indexify 0.2.40__py3-none-any.whl → 0.2.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. indexify/cli.py +92 -52
  2. indexify/executor/agent.py +99 -187
  3. indexify/executor/api_objects.py +2 -8
  4. indexify/executor/downloader.py +129 -90
  5. indexify/executor/executor_tasks.py +15 -30
  6. indexify/executor/function_executor/function_executor.py +32 -0
  7. indexify/executor/function_executor/function_executor_factory.py +26 -0
  8. indexify/executor/function_executor/function_executor_map.py +91 -0
  9. indexify/executor/function_executor/process_function_executor.py +64 -0
  10. indexify/executor/function_executor/process_function_executor_factory.py +102 -0
  11. indexify/executor/function_worker.py +227 -184
  12. indexify/executor/runtime_probes.py +9 -8
  13. indexify/executor/task_fetcher.py +80 -0
  14. indexify/executor/task_reporter.py +18 -25
  15. indexify/executor/task_store.py +35 -16
  16. indexify/function_executor/function_executor_service.py +86 -0
  17. indexify/function_executor/handlers/run_function/function_inputs_loader.py +54 -0
  18. indexify/function_executor/handlers/run_function/handler.py +149 -0
  19. indexify/function_executor/handlers/run_function/request_validator.py +24 -0
  20. indexify/function_executor/handlers/run_function/response_helper.py +98 -0
  21. indexify/function_executor/initialize_request_validator.py +22 -0
  22. indexify/function_executor/proto/configuration.py +13 -0
  23. indexify/function_executor/proto/function_executor.proto +70 -0
  24. indexify/function_executor/proto/function_executor_pb2.py +53 -0
  25. indexify/function_executor/proto/function_executor_pb2.pyi +125 -0
  26. indexify/function_executor/proto/function_executor_pb2_grpc.py +163 -0
  27. indexify/function_executor/proto/message_validator.py +38 -0
  28. indexify/function_executor/server.py +31 -0
  29. indexify/functions_sdk/data_objects.py +0 -9
  30. indexify/functions_sdk/graph.py +10 -11
  31. indexify/functions_sdk/graph_definition.py +2 -2
  32. indexify/functions_sdk/image.py +35 -30
  33. indexify/functions_sdk/indexify_functions.py +5 -5
  34. indexify/http_client.py +15 -23
  35. indexify/logging.py +32 -0
  36. {indexify-0.2.40.dist-info → indexify-0.2.42.dist-info}/METADATA +3 -1
  37. indexify-0.2.42.dist-info/RECORD +53 -0
  38. indexify/executor/indexify_executor.py +0 -32
  39. indexify-0.2.40.dist-info/RECORD +0 -34
  40. {indexify-0.2.40.dist-info → indexify-0.2.42.dist-info}/LICENSE.txt +0 -0
  41. {indexify-0.2.40.dist-info → indexify-0.2.42.dist-info}/WHEEL +0 -0
  42. {indexify-0.2.40.dist-info → indexify-0.2.42.dist-info}/entry_points.txt +0 -0
@@ -1,8 +1,6 @@
1
1
  from typing import Any, Dict, List, Optional
2
2
 
3
- from pydantic import BaseModel, Json
4
-
5
- from indexify.functions_sdk.data_objects import IndexifyData
3
+ from pydantic import BaseModel
6
4
 
7
5
 
8
6
  class Task(BaseModel):
@@ -21,7 +19,7 @@ class ExecutorMetadata(BaseModel):
21
19
  executor_version: str
22
20
  addr: str
23
21
  image_name: str
24
- image_version: int
22
+ image_hash: str
25
23
  labels: Dict[str, Any]
26
24
 
27
25
 
@@ -29,10 +27,6 @@ class RouterOutput(BaseModel):
29
27
  edges: List[str]
30
28
 
31
29
 
32
- class FnOutput(BaseModel):
33
- payload: Json
34
-
35
-
36
30
  class TaskResult(BaseModel):
37
31
  router_output: Optional[RouterOutput] = None
38
32
  outcome: str
@@ -1,22 +1,22 @@
1
+ import asyncio
1
2
  import os
2
- from typing import Optional
3
+ from typing import Any, Optional
3
4
 
4
5
  import httpx
5
6
  import structlog
6
- from pydantic import BaseModel
7
7
 
8
- from indexify.functions_sdk.data_objects import IndexifyData
8
+ from indexify.function_executor.proto.function_executor_pb2 import (
9
+ SerializedObject,
10
+ )
9
11
 
10
12
  from ..common_util import get_httpx_client
11
- from ..functions_sdk.object_serializer import JsonSerializer, get_serializer
12
13
  from .api_objects import Task
13
14
 
14
- logger = structlog.get_logger(module=__name__)
15
15
 
16
-
17
- class DownloadedInputs(BaseModel):
18
- input: IndexifyData
19
- init_value: Optional[IndexifyData] = None
16
+ class DownloadedInputs:
17
+ def __init__(self, input: SerializedObject, init_value: Optional[SerializedObject]):
18
+ self.input = input
19
+ self.init_value = init_value
20
20
 
21
21
 
22
22
  class Downloader:
@@ -24,103 +24,142 @@ class Downloader:
24
24
  self, code_path: str, base_url: str, config_path: Optional[str] = None
25
25
  ):
26
26
  self.code_path = code_path
27
- self.base_url = base_url
28
- self._client = get_httpx_client(config_path)
27
+ self._base_url = base_url
28
+ self._client = get_httpx_client(config_path, make_async=True)
29
+
30
+ async def download_graph(self, task: Task) -> SerializedObject:
31
+ # Cache graph to reduce load on the server.
32
+ graph_path = os.path.join(
33
+ self.code_path,
34
+ "graph_cache",
35
+ task.namespace,
36
+ f"{task.compute_graph}.{task.graph_version}",
37
+ )
38
+ # Filesystem operations are synchronous.
39
+ # Run in a separate thread to not block the main event loop.
40
+ graph: Optional[SerializedObject] = await asyncio.to_thread(
41
+ self._read_cached_graph, graph_path
42
+ )
43
+ if graph is not None:
44
+ return graph
45
+
46
+ logger = self._task_logger(task)
47
+ graph: SerializedObject = await self._fetch_graph(task, logger)
48
+ # Filesystem operations are synchronous.
49
+ # Run in a separate thread to not block the main event loop.
50
+ # We don't need to wait for the write completion so we use create_task.
51
+ asyncio.create_task(
52
+ asyncio.to_thread(self._write_cached_graph, task, graph_path, graph)
53
+ )
29
54
 
30
- async def download_graph(self, namespace: str, name: str, version: int) -> str:
31
- path = os.path.join(self.code_path, namespace, f"{name}.{version}")
55
+ return graph
56
+
57
+ def _read_cached_graph(self, path: str) -> Optional[SerializedObject]:
58
+ if not os.path.exists(path):
59
+ return None
60
+
61
+ with open(path, "rb") as f:
62
+ return SerializedObject.FromString(f.read())
63
+
64
+ def _write_cached_graph(
65
+ self, task: Task, path: str, graph: SerializedObject
66
+ ) -> None:
32
67
  if os.path.exists(path):
33
- return path
68
+ # Another task already cached the graph.
69
+ return None
70
+
71
+ tmp_path = os.path.join(self.code_path, "task_graph_cache", task.id)
72
+ os.makedirs(os.path.dirname(tmp_path), exist_ok=True)
73
+ with open(tmp_path, "wb") as f:
74
+ f.write(graph.SerializeToString())
75
+ os.makedirs(os.path.dirname(path), exist_ok=True)
76
+ # Atomically rename the fully written file at tmp path.
77
+ # This allows us to not use any locking because file link/unlink
78
+ # are atomic operations at filesystem level.
79
+ os.replace(tmp_path, path)
80
+
81
+ async def download_inputs(self, task: Task) -> DownloadedInputs:
82
+ logger = self._task_logger(task)
83
+
84
+ input: SerializedObject
85
+ first_function_in_graph = task.invocation_id == task.input_key.split("|")[-1]
86
+ if first_function_in_graph:
87
+ # The first function in Graph gets its input from graph invocation payload.
88
+ input = await self._fetch_graph_invocation_payload(task, logger)
89
+ else:
90
+ input = await self._fetch_function_input(task, logger)
91
+
92
+ init_value: Optional[SerializedObject] = None
93
+ if task.reducer_output_id is not None:
94
+ init_value = await self._fetch_function_init_value(task, logger)
95
+
96
+ return DownloadedInputs(input=input, init_value=init_value)
34
97
 
35
- logger.info(
36
- "downloading graph", namespace=namespace, name=name, version=version
98
+ def _task_logger(self, task: Task) -> Any:
99
+ return structlog.get_logger(
100
+ module=__name__,
101
+ namespace=task.namespace,
102
+ name=task.compute_graph,
103
+ version=task.graph_version,
104
+ task_id=task.id,
37
105
  )
38
- response = self._client.get(
39
- f"{self.base_url}/internal/namespaces/{namespace}/compute_graphs/{name}/code"
106
+
107
+ async def _fetch_graph(self, task: Task, logger: Any) -> SerializedObject:
108
+ """Downloads the compute graph for the task and returns it."""
109
+ return await self._fetch_url(
110
+ url=f"{self._base_url}/internal/namespaces/{task.namespace}/compute_graphs/{task.compute_graph}/versions/{task.graph_version}/code",
111
+ resource_description=f"compute graph: {task.compute_graph}",
112
+ logger=logger,
40
113
  )
41
- try:
42
- response.raise_for_status()
43
- except httpx.HTTPStatusError as e:
44
- logger.error(
45
- "failed to download graph",
46
- namespace=namespace,
47
- name=name,
48
- version=version,
49
- error=response.text,
50
- )
51
- raise
52
114
 
53
- os.makedirs(os.path.dirname(path), exist_ok=True)
54
- with open(path, "wb") as f:
55
- f.write(response.content)
56
- return path
57
-
58
- async def download_input(self, task: Task) -> DownloadedInputs:
59
- input_id = task.input_key.split("|")[-1]
60
- if task.invocation_id == input_id:
61
- url = f"{self.base_url}/namespaces/{task.namespace}/compute_graphs/{task.compute_graph}/invocations/{task.invocation_id}/payload"
62
- else:
63
- url = f"{self.base_url}/internal/fn_outputs/{task.input_key}"
115
+ async def _fetch_graph_invocation_payload(
116
+ self, task: Task, logger: Any
117
+ ) -> SerializedObject:
118
+ return await self._fetch_url(
119
+ url=f"{self._base_url}/namespaces/{task.namespace}/compute_graphs/{task.compute_graph}/invocations/{task.invocation_id}/payload",
120
+ resource_description=f"graph invocation payload: {task.invocation_id}",
121
+ logger=logger,
122
+ )
64
123
 
65
- reducer_url = None
66
- if task.reducer_output_id:
67
- reducer_url = f"{self.base_url}/namespaces/{task.namespace}/compute_graphs/{task.compute_graph}/invocations/{task.invocation_id}/fn/{task.compute_fn}/output/{task.reducer_output_id}"
124
+ async def _fetch_function_input(self, task: Task, logger: Any) -> SerializedObject:
125
+ return await self._fetch_url(
126
+ url=f"{self._base_url}/internal/fn_outputs/{task.input_key}",
127
+ resource_description=f"function input: {task.input_key}",
128
+ logger=logger,
129
+ )
68
130
 
69
- logger.info("downloading input", url=url, reducer_url=reducer_url)
70
- response = self._client.get(url)
131
+ async def _fetch_function_init_value(
132
+ self, task: Task, logger: Any
133
+ ) -> SerializedObject:
134
+ return await self._fetch_url(
135
+ url=f"{self._base_url}/namespaces/{task.namespace}/compute_graphs/{task.compute_graph}"
136
+ f"/invocations/{task.invocation_id}/fn/{task.compute_fn}/output/{task.reducer_output_id}",
137
+ resource_description=f"reducer output: {task.reducer_output_id}",
138
+ logger=logger,
139
+ )
71
140
 
141
+ async def _fetch_url(
142
+ self, url: str, resource_description: str, logger: Any
143
+ ) -> SerializedObject:
144
+ logger.info(f"fetching {resource_description}", url=url)
145
+ response = await self._client.get(url)
72
146
  try:
73
147
  response.raise_for_status()
74
148
  except httpx.HTTPStatusError as e:
75
149
  logger.error(
76
- "failed to download input",
77
- url=url,
78
- reducer_url=reducer_url,
150
+ f"failed to download {resource_description}",
79
151
  error=response.text,
152
+ exc_info=e,
80
153
  )
81
154
  raise
82
155
 
83
- encoder = (
84
- "json"
85
- if response.headers["content-type"] == JsonSerializer.content_type
86
- else "cloudpickle"
87
- )
88
- if task.invocation_id == input_id:
89
- return DownloadedInputs(
90
- input=IndexifyData(
91
- payload=response.content, id=input_id, encoder=encoder
92
- ),
93
- )
94
-
95
- input_payload = response.content
96
-
97
- if reducer_url:
98
- response = self._client.get(reducer_url)
99
- try:
100
- response.raise_for_status()
101
- init_value = response.content
102
- except httpx.HTTPStatusError as e:
103
- logger.error(
104
- "failed to download reducer output",
105
- url=reducer_url,
106
- error=response.text,
107
- )
108
- raise
109
- return DownloadedInputs(
110
- input=IndexifyData(
111
- input_id=task.invocation_id,
112
- payload=input_payload,
113
- encoder=encoder,
114
- ),
115
- init_value=IndexifyData(
116
- input_id=task.invocation_id, payload=init_value, encoder=encoder
117
- ),
156
+ # We're hardcoding the content type currently used by Python SDK. It might change in the future.
157
+ # There's no other way for now to determine if the response is a bytes or string.
158
+ if response.headers["content-type"] == "application/octet-stream":
159
+ return SerializedObject(
160
+ bytes=response.content, content_type=response.headers["content-type"]
118
161
  )
119
-
120
- return DownloadedInputs(
121
- input=IndexifyData(
122
- input_id=task.invocation_id,
123
- payload=input_payload,
124
- encoder=encoder,
162
+ else:
163
+ return SerializedObject(
164
+ string=response.text, content_type=response.headers["content-type"]
125
165
  )
126
- )
@@ -1,73 +1,58 @@
1
1
  import asyncio
2
- from typing import Optional
3
2
 
4
- from indexify.functions_sdk.data_objects import IndexifyData
3
+ from pydantic import BaseModel
5
4
 
6
5
  from .api_objects import Task
7
6
  from .downloader import Downloader
8
- from .function_worker import FunctionWorker
7
+ from .function_worker import FunctionWorker, FunctionWorkerInput
9
8
 
10
9
 
11
10
  class DownloadGraphTask(asyncio.Task):
12
11
  def __init__(
13
12
  self,
14
13
  *,
15
- task: Task,
14
+ function_worker_input: FunctionWorkerInput,
16
15
  downloader: Downloader,
17
16
  **kwargs,
18
17
  ):
19
18
  kwargs["name"] = "download_graph"
20
19
  kwargs["loop"] = asyncio.get_event_loop()
21
20
  super().__init__(
22
- downloader.download_graph(
23
- task.namespace, task.compute_graph, task.graph_version
24
- ),
21
+ downloader.download_graph(function_worker_input.task),
25
22
  **kwargs,
26
23
  )
27
- self.task = task
24
+ self.function_worker_input = function_worker_input
28
25
 
29
26
 
30
- class DownloadInputTask(asyncio.Task):
27
+ class DownloadInputsTask(asyncio.Task):
31
28
  def __init__(
32
29
  self,
33
30
  *,
34
- task: Task,
31
+ function_worker_input: FunctionWorkerInput,
35
32
  downloader: Downloader,
36
33
  **kwargs,
37
34
  ):
38
- kwargs["name"] = "download_input"
35
+ kwargs["name"] = "download_inputs"
39
36
  kwargs["loop"] = asyncio.get_event_loop()
40
37
  super().__init__(
41
- downloader.download_input(task),
38
+ downloader.download_inputs(function_worker_input.task),
42
39
  **kwargs,
43
40
  )
44
- self.task = task
41
+ self.function_worker_input = function_worker_input
45
42
 
46
43
 
47
- class ExtractTask(asyncio.Task):
44
+ class RunTask(asyncio.Task):
48
45
  def __init__(
49
46
  self,
50
47
  *,
51
48
  function_worker: FunctionWorker,
52
- task: Task,
53
- input: IndexifyData,
54
- init_value: Optional[IndexifyData] = None,
55
- code_path: str,
49
+ function_worker_input: FunctionWorkerInput,
56
50
  **kwargs,
57
51
  ):
58
- kwargs["name"] = "run_function"
52
+ kwargs["name"] = "run_task"
59
53
  kwargs["loop"] = asyncio.get_event_loop()
60
54
  super().__init__(
61
- function_worker.async_submit(
62
- namespace=task.namespace,
63
- graph_name=task.compute_graph,
64
- fn_name=task.compute_fn,
65
- input=input,
66
- init_value=init_value,
67
- code_path=code_path,
68
- version=task.graph_version,
69
- invocation_id=task.invocation_id,
70
- ),
55
+ function_worker.run(function_worker_input),
71
56
  **kwargs,
72
57
  )
73
- self.task = task
58
+ self.function_worker_input = function_worker_input
@@ -0,0 +1,32 @@
1
+ from typing import Any, Optional
2
+
3
+ import grpc
4
+
5
+ # Timeout for Function Executor startup in seconds.
6
+ # The timeout is counted from the moment when the Function Executor environment
7
+ # is fully prepared and the Function Executor gets started.
8
+ FUNCTION_EXECUTOR_READY_TIMEOUT_SEC = 5
9
+
10
+
11
+ class FunctionExecutor:
12
+ """Abstract interface for a FunctionExecutor.
13
+
14
+ FunctionExecutor is a class that executes tasks for a particular function.
15
+ FunctionExecutor implements the gRPC server that listens for incoming tasks.
16
+ """
17
+
18
+ async def channel(self) -> grpc.aio.Channel:
19
+ """Returns a async gRPC channel to the Function Executor.
20
+
21
+ The channel is in ready state and can be used for all gRPC communication with the Function Executor
22
+ and can be shared among coroutines running in the same event loop in the same thread. Users should
23
+ not close the channel as it's reused for all requests.
24
+ Raises Exception if an error occurred."""
25
+ raise NotImplementedError
26
+
27
+ def state(self) -> Optional[Any]:
28
+ """Returns optional state object.
29
+
30
+ The state object can be used to associate any data with the Function Executor.
31
+ """
32
+ raise NotImplementedError
@@ -0,0 +1,26 @@
1
+ from typing import Any, Optional
2
+
3
+ from .function_executor import FunctionExecutor
4
+
5
+
6
+ class FunctionExecutorFactory:
7
+ """Abstract class for creating function executors."""
8
+
9
+ async def create(
10
+ self, logger: Any, state: Optional[Any] = None
11
+ ) -> FunctionExecutor:
12
+ """Creates a new FunctionExecutor.
13
+
14
+ Args:
15
+ logger: logger to be used during the function.
16
+ state: state to be stored in the FunctionExecutor."""
17
+ raise NotImplementedError()
18
+
19
+ async def destroy(self, executor: FunctionExecutor, logger: Any) -> None:
20
+ """Destroys the FunctionExecutor and release all its resources.
21
+
22
+ Args:
23
+ logger: logger to be used during the function.
24
+ FunctionExecutor and customer code running inside of it are not notified about the destruction.
25
+ Never raises any Exceptions."""
26
+ raise NotImplementedError
@@ -0,0 +1,91 @@
1
+ import asyncio
2
+ from typing import Any, Dict, Optional
3
+
4
+ import grpc
5
+
6
+ from indexify.function_executor.proto.function_executor_pb2 import (
7
+ InitializeRequest,
8
+ InitializeResponse,
9
+ )
10
+ from indexify.function_executor.proto.function_executor_pb2_grpc import (
11
+ FunctionExecutorStub,
12
+ )
13
+
14
+ from .function_executor import FunctionExecutor
15
+ from .function_executor_factory import FunctionExecutorFactory
16
+
17
+
18
+ class FunctionExecutorMap:
19
+ """A map of ID => FunctionExecutor.
20
+
21
+ The map is safe to use by multiple couroutines running in event loop on the same thread
22
+ but it's not thread safe (can't be used from different threads concurrently)."""
23
+
24
+ def __init__(self, factory: FunctionExecutorFactory):
25
+ self._factory = factory
26
+ # Map of initialized Function executors ready to run tasks.
27
+ # function ID -> FunctionExecutor
28
+ self._executors: Dict[str, FunctionExecutor] = {}
29
+ # We have to do all operations under this lock because we need to ensure
30
+ # that we don't create more Function Executors than required. This is important
31
+ # e.g. when a Function Executor is using the only available GPU on the machine.
32
+ # We can get rid of this locking in the future once we assing GPUs explicitly to Function Executors.
33
+ # Running the full test suite with all this locking removed doesn't make it run faster,
34
+ # so it looks like this full locking doesn't really result in any performance penalty so far.
35
+ self._executors_lock = asyncio.Lock()
36
+
37
+ async def get_or_create(
38
+ self,
39
+ id: str,
40
+ initialize_request: InitializeRequest,
41
+ initial_state: Any,
42
+ logger: Any,
43
+ ) -> FunctionExecutor:
44
+ """Returns a FunctionExecutor for the given ID.
45
+
46
+ If the FunctionExecutor for the given ID doesn't exist then it will be created and initialized.
47
+ Raises an exception if the FunctionExecutor creation or initialization failed.
48
+ """
49
+ async with self._executors_lock:
50
+ # Use existing Function Executor if it's already initialized.
51
+ if id in self._executors:
52
+ return self._executors[id]
53
+
54
+ executor: Optional[FunctionExecutor] = None
55
+ try:
56
+ executor = await self._factory.create(logger, state=initial_state)
57
+ channel: grpc.aio.Channel = await executor.channel()
58
+ stub: FunctionExecutorStub = FunctionExecutorStub(channel)
59
+ initialize_response: InitializeResponse = await stub.initialize(
60
+ initialize_request
61
+ )
62
+ if not initialize_response.success:
63
+ raise Exception("initialize RPC failed at function executor")
64
+ except Exception:
65
+ if executor is not None:
66
+ await self._factory.destroy(executor=executor, logger=logger)
67
+ # Function Executor creation or initialization failed.
68
+ raise
69
+
70
+ self._executors[id] = executor
71
+ return executor
72
+
73
+ async def delete(
74
+ self, id: str, function_executor: FunctionExecutor, logger: Any
75
+ ) -> None:
76
+ """Deletes the FunctionExecutor for the given ID.
77
+
78
+ Does nothing if the FunctionExecutor for the given ID doesn't exist or was already deleted.
79
+ """
80
+ async with self._executors_lock:
81
+ if self._executors[id] != function_executor:
82
+ # Function Executor was already deleted or replaced and the caller is not aware of this.
83
+ return
84
+ del self._executors[id]
85
+ await self._factory.destroy(executor=function_executor, logger=logger)
86
+
87
+ async def clear(self, logger):
88
+ async with self._executors_lock:
89
+ while self._executors:
90
+ id, function_executor = self._executors.popitem()
91
+ await self._factory.destroy(function_executor, logger)
@@ -0,0 +1,64 @@
1
+ import asyncio
2
+ from typing import Any, Optional
3
+
4
+ import grpc
5
+
6
+ from indexify.function_executor.proto.configuration import GRPC_CHANNEL_OPTIONS
7
+
8
+ from .function_executor import (
9
+ FUNCTION_EXECUTOR_READY_TIMEOUT_SEC,
10
+ FunctionExecutor,
11
+ )
12
+
13
+
14
+ class ProcessFunctionExecutor(FunctionExecutor):
15
+ """A FunctionExecutor that runs in a separate host process."""
16
+
17
+ def __init__(
18
+ self,
19
+ process: asyncio.subprocess.Process,
20
+ port: int,
21
+ address: str,
22
+ logger: Any,
23
+ state: Optional[Any] = None,
24
+ ):
25
+ self._proc = process
26
+ self._port = port
27
+ self._address = address
28
+ self._logger = logger.bind(module=__name__)
29
+ self._channel: Optional[grpc.aio.Channel] = None
30
+ self._state: Optional[Any] = state
31
+
32
+ async def channel(self) -> grpc.aio.Channel:
33
+ # Not thread safe but async safe because we don't await.
34
+ if self._channel is not None:
35
+ return self._channel
36
+
37
+ channel: Optional[grpc.aio.Channel] = None
38
+ try:
39
+ channel = grpc.aio.insecure_channel(
40
+ self._address, options=GRPC_CHANNEL_OPTIONS
41
+ )
42
+ await asyncio.wait_for(
43
+ channel.channel_ready(),
44
+ timeout=FUNCTION_EXECUTOR_READY_TIMEOUT_SEC,
45
+ )
46
+ # Check if another channel was created by a concurrent coroutine.
47
+ # Not thread safe but async safe because we never overwrite non-None self._channel.
48
+ if self._channel is not None:
49
+ # Don't close and overwrite existing channel because it might be used for RPCs already.
50
+ await channel.close()
51
+ return self._channel
52
+ else:
53
+ self._channel = channel
54
+ return channel
55
+ except Exception:
56
+ if channel is not None:
57
+ await channel.close()
58
+ self._logger.error(
59
+ f"failed to connect to the gRPC server at {self._address} within {FUNCTION_EXECUTOR_READY_TIMEOUT_SEC} seconds"
60
+ )
61
+ raise
62
+
63
+ def state(self) -> Optional[Any]:
64
+ return self._state