indexify 0.3.16__py3-none-any.whl → 0.3.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- indexify/cli/cli.py +19 -2
- indexify/executor/executor.py +24 -9
- indexify/executor/executor_flavor.py +7 -0
- indexify/executor/function_executor/function_executor.py +5 -2
- indexify/executor/function_executor/health_checker.py +55 -13
- indexify/executor/grpc/channel_manager.py +160 -0
- indexify/executor/grpc/state_reconciler.py +14 -9
- indexify/executor/grpc/state_reporter.py +72 -14
- indexify/executor/metrics/task_runner.py +7 -0
- indexify/executor/task_fetcher.py +8 -3
- indexify/executor/task_reporter.py +17 -0
- indexify/executor/task_runner.py +4 -0
- indexify/proto/{task_scheduler.proto → executor_api.proto} +23 -6
- indexify/proto/executor_api_pb2.py +70 -0
- indexify/proto/{task_scheduler_pb2.pyi → executor_api_pb2.pyi} +44 -4
- indexify/proto/{task_scheduler_pb2_grpc.py → executor_api_pb2_grpc.py} +36 -26
- {indexify-0.3.16.dist-info → indexify-0.3.18.dist-info}/METADATA +1 -1
- {indexify-0.3.16.dist-info → indexify-0.3.18.dist-info}/RECORD +21 -20
- indexify/executor/grpc/channel_creator.py +0 -53
- indexify/proto/task_scheduler_pb2.py +0 -64
- /indexify/executor/grpc/metrics/{channel_creator.py → channel_manager.py} +0 -0
- {indexify-0.3.16.dist-info → indexify-0.3.18.dist-info}/WHEEL +0 -0
- {indexify-0.3.16.dist-info → indexify-0.3.18.dist-info}/entry_points.txt +0 -0
@@ -1,37 +1,44 @@
|
|
1
1
|
import asyncio
|
2
|
+
import hashlib
|
3
|
+
from socket import gethostname
|
2
4
|
from typing import Any, Dict, List, Optional
|
3
5
|
|
4
6
|
import grpc
|
5
7
|
|
6
|
-
from indexify.proto.
|
8
|
+
from indexify.proto.executor_api_pb2 import (
|
7
9
|
AllowedFunction,
|
10
|
+
)
|
11
|
+
from indexify.proto.executor_api_pb2 import ExecutorFlavor as ExecutorFlavorProto
|
12
|
+
from indexify.proto.executor_api_pb2 import (
|
8
13
|
ExecutorState,
|
9
14
|
ExecutorStatus,
|
10
15
|
FunctionExecutorDescription,
|
11
16
|
)
|
12
|
-
from indexify.proto.
|
17
|
+
from indexify.proto.executor_api_pb2 import (
|
13
18
|
FunctionExecutorState as FunctionExecutorStateProto,
|
14
19
|
)
|
15
|
-
from indexify.proto.
|
20
|
+
from indexify.proto.executor_api_pb2 import (
|
16
21
|
FunctionExecutorStatus as FunctionExecutorStatusProto,
|
17
22
|
)
|
18
|
-
from indexify.proto.
|
23
|
+
from indexify.proto.executor_api_pb2 import (
|
19
24
|
GPUModel,
|
20
25
|
GPUResources,
|
21
26
|
HostResources,
|
22
27
|
ReportExecutorStateRequest,
|
23
28
|
)
|
24
|
-
from indexify.proto.
|
25
|
-
|
29
|
+
from indexify.proto.executor_api_pb2_grpc import (
|
30
|
+
ExecutorAPIStub,
|
26
31
|
)
|
27
32
|
|
28
33
|
from ..api_objects import FunctionURI
|
34
|
+
from ..executor_flavor import ExecutorFlavor
|
29
35
|
from ..function_executor.function_executor_state import FunctionExecutorState
|
30
36
|
from ..function_executor.function_executor_states_container import (
|
31
37
|
FunctionExecutorStatesContainer,
|
32
38
|
)
|
33
39
|
from ..function_executor.function_executor_status import FunctionExecutorStatus
|
34
|
-
from
|
40
|
+
from ..runtime_probes import RuntimeProbes
|
41
|
+
from .channel_manager import ChannelManager
|
35
42
|
from .metrics.state_reporter import (
|
36
43
|
metric_state_report_errors,
|
37
44
|
metric_state_report_latency,
|
@@ -47,24 +54,32 @@ class ExecutorStateReporter:
|
|
47
54
|
def __init__(
|
48
55
|
self,
|
49
56
|
executor_id: str,
|
57
|
+
flavor: ExecutorFlavor,
|
58
|
+
version: str,
|
59
|
+
labels: Dict[str, str],
|
50
60
|
development_mode: bool,
|
51
61
|
function_allowlist: Optional[List[FunctionURI]],
|
52
62
|
function_executor_states: FunctionExecutorStatesContainer,
|
53
|
-
|
63
|
+
channel_manager: ChannelManager,
|
54
64
|
logger: Any,
|
55
65
|
):
|
56
66
|
self._executor_id: str = executor_id
|
67
|
+
self._flavor: ExecutorFlavor = flavor
|
68
|
+
self._version: str = version
|
69
|
+
self._labels: Dict[str, str] = labels.copy()
|
57
70
|
self._development_mode: bool = development_mode
|
71
|
+
self._hostname: str = gethostname()
|
58
72
|
self._function_executor_states: FunctionExecutorStatesContainer = (
|
59
73
|
function_executor_states
|
60
74
|
)
|
61
|
-
self.
|
75
|
+
self._channel_manager = channel_manager
|
62
76
|
self._logger: Any = logger.bind(module=__name__)
|
63
77
|
self._is_shutdown: bool = False
|
64
78
|
self._executor_status: ExecutorStatus = ExecutorStatus.EXECUTOR_STATUS_UNKNOWN
|
65
79
|
self._allowed_functions: List[AllowedFunction] = _to_grpc_allowed_functions(
|
66
80
|
function_allowlist
|
67
81
|
)
|
82
|
+
self._labels.update(_label_values_to_strings(RuntimeProbes().probe().labels))
|
68
83
|
|
69
84
|
def update_executor_status(self, value: ExecutorStatus):
|
70
85
|
self._executor_status = value
|
@@ -75,12 +90,16 @@ class ExecutorStateReporter:
|
|
75
90
|
Never raises any exceptions.
|
76
91
|
"""
|
77
92
|
while not self._is_shutdown:
|
78
|
-
async with await self.
|
93
|
+
async with await self._channel_manager.get_channel() as server_channel:
|
79
94
|
server_channel: grpc.aio.Channel
|
80
|
-
stub =
|
95
|
+
stub = ExecutorAPIStub(server_channel)
|
81
96
|
while not self._is_shutdown:
|
82
97
|
try:
|
83
|
-
|
98
|
+
# The periodic state reports serve as channel health monitoring requests
|
99
|
+
# (same as TCP keep-alive). Channel Manager returns the same healthy channel
|
100
|
+
# for all RPCs that we do from Executor to Server. So all the RPCs benefit
|
101
|
+
# from this channel health monitoring.
|
102
|
+
await self.report_state(stub)
|
84
103
|
await asyncio.sleep(_REPORTING_INTERVAL_SEC)
|
85
104
|
except Exception as e:
|
86
105
|
self._logger.error(
|
@@ -92,7 +111,11 @@ class ExecutorStateReporter:
|
|
92
111
|
|
93
112
|
self._logger.info("State reporter shutdown")
|
94
113
|
|
95
|
-
async def
|
114
|
+
async def report_state(self, stub: ExecutorAPIStub):
|
115
|
+
"""Reports the current state to the server represented by the supplied stub.
|
116
|
+
|
117
|
+
Raises exceptions on failure.
|
118
|
+
"""
|
96
119
|
with (
|
97
120
|
metric_state_report_errors.count_exceptions(),
|
98
121
|
metric_state_report_latency.time(),
|
@@ -101,11 +124,16 @@ class ExecutorStateReporter:
|
|
101
124
|
state = ExecutorState(
|
102
125
|
executor_id=self._executor_id,
|
103
126
|
development_mode=self._development_mode,
|
104
|
-
|
127
|
+
hostname=self._hostname,
|
128
|
+
flavor=_to_grpc_executor_flavor(self._flavor, self._logger),
|
129
|
+
version=self._version,
|
130
|
+
status=self._executor_status,
|
105
131
|
free_resources=await self._fetch_free_host_resources(),
|
106
132
|
allowed_functions=self._allowed_functions,
|
107
133
|
function_executor_states=await self._fetch_function_executor_states(),
|
134
|
+
labels=self._labels,
|
108
135
|
)
|
136
|
+
state.state_hash = _state_hash(state)
|
109
137
|
|
110
138
|
await stub.report_executor_state(
|
111
139
|
ReportExecutorStateRequest(executor_state=state),
|
@@ -197,3 +225,33 @@ def _to_grpc_function_executor_status(
|
|
197
225
|
logger.error("Unexpected Function Executor status", status=status)
|
198
226
|
|
199
227
|
return result
|
228
|
+
|
229
|
+
|
230
|
+
_FLAVOR_MAPPING = {
|
231
|
+
ExecutorFlavor.OSS: ExecutorFlavorProto.EXECUTOR_FLAVOR_OSS,
|
232
|
+
ExecutorFlavor.PLATFORM: ExecutorFlavorProto.EXECUTOR_FLAVOR_PLATFORM,
|
233
|
+
}
|
234
|
+
|
235
|
+
|
236
|
+
def _to_grpc_executor_flavor(
|
237
|
+
flavor: ExecutorFlavor, logger: Any
|
238
|
+
) -> ExecutorFlavorProto:
|
239
|
+
result: ExecutorFlavorProto = _FLAVOR_MAPPING.get(
|
240
|
+
flavor, ExecutorFlavorProto.EXECUTOR_FLAVOR_UNKNOWN
|
241
|
+
)
|
242
|
+
|
243
|
+
if result == ExecutorFlavorProto.EXECUTOR_FLAVOR_UNKNOWN:
|
244
|
+
logger.error("Unexpected Executor flavor", flavor=flavor)
|
245
|
+
|
246
|
+
return result
|
247
|
+
|
248
|
+
|
249
|
+
def _label_values_to_strings(labels: Dict[str, Any]) -> Dict[str, str]:
|
250
|
+
return {k: str(v) for k, v in labels.items()}
|
251
|
+
|
252
|
+
|
253
|
+
def _state_hash(state: ExecutorState) -> str:
|
254
|
+
serialized_state: bytes = state.SerializeToString(deterministic=True)
|
255
|
+
hasher = hashlib.sha256(usedforsecurity=False)
|
256
|
+
hasher.update(serialized_state)
|
257
|
+
return hasher.hexdigest()
|
@@ -23,6 +23,13 @@ metric_tasks_blocked_by_policy: prometheus_client.Gauge = prometheus_client.Gaug
|
|
23
23
|
"tasks_blocked_by_policy",
|
24
24
|
"Number of tasks that are ready for execution but are blocked according to the current policy (typically waiting for a free Function Executor)",
|
25
25
|
)
|
26
|
+
metric_tasks_blocked_by_policy_per_function_name: prometheus_client.Gauge = (
|
27
|
+
prometheus_client.Gauge(
|
28
|
+
"tasks_blocked_by_policy_per_function_name",
|
29
|
+
"Number of tasks that are ready for execution but are blocked according to the current policy (typically waiting for a free Function Executor)",
|
30
|
+
["function_name"],
|
31
|
+
)
|
32
|
+
)
|
26
33
|
|
27
34
|
# Metrics for the stage when task is running.
|
28
35
|
metric_task_runs: prometheus_client.Counter = prometheus_client.Counter(
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import json
|
2
2
|
import time
|
3
|
-
from
|
3
|
+
from socket import gethostname
|
4
|
+
from typing import AsyncGenerator, Dict, List, Optional
|
4
5
|
|
5
6
|
import structlog
|
6
7
|
from httpx_sse import aconnect_sse
|
@@ -22,6 +23,7 @@ class TaskFetcher:
|
|
22
23
|
self,
|
23
24
|
executor_id: str,
|
24
25
|
executor_version: str,
|
26
|
+
labels: Dict[str, str],
|
25
27
|
function_allowlist: Optional[List[FunctionURI]],
|
26
28
|
protocol: str,
|
27
29
|
indexify_server_addr: str,
|
@@ -33,12 +35,15 @@ class TaskFetcher:
|
|
33
35
|
self._logger = structlog.get_logger(module=__name__)
|
34
36
|
|
35
37
|
probe_info: ProbeInfo = RuntimeProbes().probe()
|
38
|
+
all_labels = probe_info.labels.copy()
|
39
|
+
all_labels.update(labels)
|
40
|
+
|
36
41
|
self._executor_metadata: ExecutorMetadata = ExecutorMetadata(
|
37
42
|
id=executor_id,
|
38
43
|
executor_version=executor_version,
|
39
|
-
addr=
|
44
|
+
addr=gethostname(),
|
40
45
|
function_allowlist=function_allowlist,
|
41
|
-
labels=
|
46
|
+
labels=all_labels,
|
42
47
|
)
|
43
48
|
|
44
49
|
async def run(self) -> AsyncGenerator[Task, None]:
|
@@ -49,6 +49,7 @@ class TaskReporter:
|
|
49
49
|
):
|
50
50
|
self._base_url = base_url
|
51
51
|
self._executor_id = executor_id
|
52
|
+
self._is_shutdown = False
|
52
53
|
# Use thread-safe sync client due to issues with async client.
|
53
54
|
# Async client attempts to use connections it already closed.
|
54
55
|
# See e.g. https://github.com/encode/httpx/issues/2337.
|
@@ -56,9 +57,25 @@ class TaskReporter:
|
|
56
57
|
# results in not reusing established TCP connections to server.
|
57
58
|
self._client = get_httpx_client(config_path, make_async=False)
|
58
59
|
|
60
|
+
async def shutdown(self):
|
61
|
+
"""Shuts down the task reporter.
|
62
|
+
|
63
|
+
Task reporter stops reporting all task outcomes to the Server.
|
64
|
+
There are many task failures due to Executor shutdown. We give wrong
|
65
|
+
signals to Server if we report such failures.
|
66
|
+
"""
|
67
|
+
self._is_shutdown = True
|
68
|
+
|
59
69
|
async def report(self, output: TaskOutput, logger: Any):
|
60
70
|
"""Reports result of the supplied task."""
|
61
71
|
logger = logger.bind(module=__name__)
|
72
|
+
|
73
|
+
if self._is_shutdown:
|
74
|
+
logger.warning(
|
75
|
+
"task reporter got shutdown, skipping task outcome reporting"
|
76
|
+
)
|
77
|
+
return
|
78
|
+
|
62
79
|
task_result, output_files, output_summary = self._process_task_output(output)
|
63
80
|
task_result_data = task_result.model_dump_json(exclude_none=True)
|
64
81
|
|
indexify/executor/task_runner.py
CHANGED
@@ -22,6 +22,7 @@ from .metrics.task_runner import (
|
|
22
22
|
metric_task_run_platform_errors,
|
23
23
|
metric_task_runs,
|
24
24
|
metric_tasks_blocked_by_policy,
|
25
|
+
metric_tasks_blocked_by_policy_per_function_name,
|
25
26
|
metric_tasks_running,
|
26
27
|
)
|
27
28
|
|
@@ -55,6 +56,9 @@ class TaskRunner:
|
|
55
56
|
with (
|
56
57
|
metric_task_policy_errors.count_exceptions(),
|
57
58
|
metric_tasks_blocked_by_policy.track_inprogress(),
|
59
|
+
metric_tasks_blocked_by_policy_per_function_name.labels(
|
60
|
+
function_name=task_input.task.compute_fn
|
61
|
+
).track_inprogress(),
|
58
62
|
metric_task_policy_latency.time(),
|
59
63
|
):
|
60
64
|
metric_task_policy_runs.inc()
|
@@ -1,6 +1,8 @@
|
|
1
1
|
syntax = "proto3";
|
2
2
|
|
3
|
-
package
|
3
|
+
// Rename with caution. The package name is part of gRPC service name.
|
4
|
+
// Existing clients won't find the service if the package name changes.
|
5
|
+
package executor_api_pb;
|
4
6
|
|
5
7
|
// ===== ReportExecutorState RPC =====
|
6
8
|
|
@@ -81,15 +83,26 @@ enum ExecutorStatus {
|
|
81
83
|
EXECUTOR_STATUS_STOPPED = 5;
|
82
84
|
}
|
83
85
|
|
86
|
+
enum ExecutorFlavor {
|
87
|
+
EXECUTOR_FLAVOR_UNKNOWN = 0;
|
88
|
+
EXECUTOR_FLAVOR_OSS = 1;
|
89
|
+
EXECUTOR_FLAVOR_PLATFORM = 2;
|
90
|
+
}
|
91
|
+
|
84
92
|
message ExecutorState {
|
85
93
|
optional string executor_id = 1;
|
86
94
|
optional bool development_mode = 2;
|
87
|
-
optional
|
95
|
+
optional string hostname = 3;
|
96
|
+
optional ExecutorFlavor flavor = 4;
|
97
|
+
optional string version = 5;
|
98
|
+
optional ExecutorStatus status = 6;
|
88
99
|
// Free resources available at the Executor.
|
89
|
-
optional HostResources free_resources =
|
100
|
+
optional HostResources free_resources = 7;
|
90
101
|
// Empty allowed_functions list means that any function can run on the Executor.
|
91
|
-
repeated AllowedFunction allowed_functions =
|
92
|
-
repeated FunctionExecutorState function_executor_states =
|
102
|
+
repeated AllowedFunction allowed_functions = 8;
|
103
|
+
repeated FunctionExecutorState function_executor_states = 9;
|
104
|
+
map<string, string> labels = 10;
|
105
|
+
optional string state_hash = 11;
|
93
106
|
}
|
94
107
|
|
95
108
|
// A message sent by Executor to report its up to date state to Server.
|
@@ -136,7 +149,11 @@ message DesiredExecutorState {
|
|
136
149
|
|
137
150
|
// Internal API for scheduling and running tasks on Executors. Executors are acting as clients of this API.
|
138
151
|
// Server is responsible for scheduling tasks on Executors and Executors are responsible for running the tasks.
|
139
|
-
|
152
|
+
//
|
153
|
+
// Rename with caution. Existing clients won't find the service if the service name changes. A HTTP2 ingress proxy
|
154
|
+
// might use the service name in it HTTP2 path based routing rules. See how gRPC uses service names in its HTTP2 paths
|
155
|
+
// at https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md.
|
156
|
+
service ExecutorAPI {
|
140
157
|
// Called by Executor every 5 seconds to report that it's still alive and provide its current state.
|
141
158
|
//
|
142
159
|
// Missing 3 reports will result in the Executor being deregistered by Server.
|
@@ -0,0 +1,70 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
3
|
+
# NO CHECKED-IN PROTOBUF GENCODE
|
4
|
+
# source: indexify/proto/executor_api.proto
|
5
|
+
# Protobuf Python Version: 5.29.0
|
6
|
+
"""Generated protocol buffer code."""
|
7
|
+
from google.protobuf import descriptor as _descriptor
|
8
|
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
9
|
+
from google.protobuf import runtime_version as _runtime_version
|
10
|
+
from google.protobuf import symbol_database as _symbol_database
|
11
|
+
from google.protobuf.internal import builder as _builder
|
12
|
+
|
13
|
+
_runtime_version.ValidateProtobufRuntimeVersion(
|
14
|
+
_runtime_version.Domain.PUBLIC, 5, 29, 0, "", "indexify/proto/executor_api.proto"
|
15
|
+
)
|
16
|
+
# @@protoc_insertion_point(imports)
|
17
|
+
|
18
|
+
_sym_db = _symbol_database.Default()
|
19
|
+
|
20
|
+
|
21
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
22
|
+
b'\n!indexify/proto/executor_api.proto\x12\x0f\x65xecutor_api_pb"e\n\x0cGPUResources\x12\x12\n\x05\x63ount\x18\x01 \x01(\rH\x00\x88\x01\x01\x12-\n\x05model\x18\x02 \x01(\x0e\x32\x19.executor_api_pb.GPUModelH\x01\x88\x01\x01\x42\x08\n\x06_countB\x08\n\x06_model"\xc2\x01\n\rHostResources\x12\x16\n\tcpu_count\x18\x01 \x01(\rH\x00\x88\x01\x01\x12\x19\n\x0cmemory_bytes\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\x17\n\ndisk_bytes\x18\x03 \x01(\x04H\x02\x88\x01\x01\x12/\n\x03gpu\x18\x04 \x01(\x0b\x32\x1d.executor_api_pb.GPUResourcesH\x03\x88\x01\x01\x42\x0c\n\n_cpu_countB\x0f\n\r_memory_bytesB\r\n\x0b_disk_bytesB\x06\n\x04_gpu"\xbb\x01\n\x0f\x41llowedFunction\x12\x16\n\tnamespace\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\ngraph_name\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x1a\n\rfunction_name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\rgraph_version\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x0c\n\n_namespaceB\r\n\x0b_graph_nameB\x10\n\x0e_function_nameB\x10\n\x0e_graph_version"\xed\x02\n\x1b\x46unctionExecutorDescription\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tnamespace\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\ngraph_name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\rgraph_version\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x1a\n\rfunction_name\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x16\n\timage_uri\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x14\n\x0csecret_names\x18\x07 \x03(\t\x12<\n\x0fresource_limits\x18\x08 \x01(\x0b\x32\x1e.executor_api_pb.HostResourcesH\x06\x88\x01\x01\x42\x05\n\x03_idB\x0c\n\n_namespaceB\r\n\x0b_graph_nameB\x10\n\x0e_graph_versionB\x10\n\x0e_function_nameB\x0c\n\n_image_uriB\x12\n\x10_resource_limits"\xb8\x01\n\x15\x46unctionExecutorState\x12\x46\n\x0b\x64\x65scription\x18\x01 \x01(\x0b\x32,.executor_api_pb.FunctionExecutorDescriptionH\x00\x88\x01\x01\x12<\n\x06status\x18\x02 \x01(\x0e\x32\'.executor_api_pb.FunctionExecutorStatusH\x01\x88\x01\x01\x42\x0e\n\x0c_descriptionB\t\n\x07_status"\x9f\x05\n\rExecutorState\x12\x18\n\x0b\x65xecutor_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1d\n\x10\x64\x65velopment_mode\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x15\n\x08hostname\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x34\n\x06\x66lavor\x18\x04 \x01(\x0e\x32\x1f.executor_api_pb.ExecutorFlavorH\x03\x88\x01\x01\x12\x14\n\x07version\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x34\n\x06status\x18\x06 \x01(\x0e\x32\x1f.executor_api_pb.ExecutorStatusH\x05\x88\x01\x01\x12;\n\x0e\x66ree_resources\x18\x07 \x01(\x0b\x32\x1e.executor_api_pb.HostResourcesH\x06\x88\x01\x01\x12;\n\x11\x61llowed_functions\x18\x08 \x03(\x0b\x32 .executor_api_pb.AllowedFunction\x12H\n\x18\x66unction_executor_states\x18\t \x03(\x0b\x32&.executor_api_pb.FunctionExecutorState\x12:\n\x06labels\x18\n \x03(\x0b\x32*.executor_api_pb.ExecutorState.LabelsEntry\x12\x17\n\nstate_hash\x18\x0b \x01(\tH\x07\x88\x01\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x0e\n\x0c_executor_idB\x13\n\x11_development_modeB\x0b\n\t_hostnameB\t\n\x07_flavorB\n\n\x08_versionB\t\n\x07_statusB\x11\n\x0f_free_resourcesB\r\n\x0b_state_hash"l\n\x1aReportExecutorStateRequest\x12;\n\x0e\x65xecutor_state\x18\x01 \x01(\x0b\x32\x1e.executor_api_pb.ExecutorStateH\x00\x88\x01\x01\x42\x11\n\x0f_executor_state"\x1d\n\x1bReportExecutorStateResponse"\x88\x03\n\x04Task\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tnamespace\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\ngraph_name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\rgraph_version\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x1a\n\rfunction_name\x18\x05 \x01(\tH\x04\x88\x01\x01\x12 \n\x13graph_invocation_id\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x16\n\tinput_key\x18\x08 \x01(\tH\x06\x88\x01\x01\x12\x1f\n\x12reducer_output_key\x18\t \x01(\tH\x07\x88\x01\x01\x12\x17\n\ntimeout_ms\x18\n \x01(\tH\x08\x88\x01\x01\x42\x05\n\x03_idB\x0c\n\n_namespaceB\r\n\x0b_graph_nameB\x10\n\x0e_graph_versionB\x10\n\x0e_function_nameB\x16\n\x14_graph_invocation_idB\x0c\n\n_input_keyB\x15\n\x13_reducer_output_keyB\r\n\x0b_timeout_ms"\x7f\n\x0eTaskAllocation\x12!\n\x14\x66unction_executor_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12(\n\x04task\x18\x02 \x01(\x0b\x32\x15.executor_api_pb.TaskH\x01\x88\x01\x01\x42\x17\n\x15_function_executor_idB\x07\n\x05_task"K\n\x1fGetDesiredExecutorStatesRequest\x12\x18\n\x0b\x65xecutor_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x0e\n\x0c_executor_id"\xb9\x01\n\x14\x44\x65siredExecutorState\x12H\n\x12\x66unction_executors\x18\x01 \x03(\x0b\x32,.executor_api_pb.FunctionExecutorDescription\x12\x39\n\x10task_allocations\x18\x02 \x03(\x0b\x32\x1f.executor_api_pb.TaskAllocation\x12\x12\n\x05\x63lock\x18\x03 \x01(\x04H\x00\x88\x01\x01\x42\x08\n\x06_clock*\x86\x03\n\x08GPUModel\x12\x15\n\x11GPU_MODEL_UNKNOWN\x10\x00\x12"\n\x1eGPU_MODEL_NVIDIA_TESLA_T4_16GB\x10\n\x12$\n GPU_MODEL_NVIDIA_TESLA_V100_16GB\x10\x14\x12\x1d\n\x19GPU_MODEL_NVIDIA_A10_24GB\x10\x1e\x12\x1f\n\x1bGPU_MODEL_NVIDIA_A6000_48GB\x10(\x12#\n\x1fGPU_MODEL_NVIDIA_A100_SXM4_40GB\x10\x32\x12#\n\x1fGPU_MODEL_NVIDIA_A100_SXM4_80GB\x10\x33\x12"\n\x1eGPU_MODEL_NVIDIA_A100_PCI_40GB\x10\x34\x12#\n\x1fGPU_MODEL_NVIDIA_H100_SXM5_80GB\x10<\x12"\n\x1eGPU_MODEL_NVIDIA_H100_PCI_80GB\x10=\x12"\n\x1eGPU_MODEL_NVIDIA_RTX_6000_24GB\x10>*\xa3\x03\n\x16\x46unctionExecutorStatus\x12$\n FUNCTION_EXECUTOR_STATUS_UNKNOWN\x10\x00\x12(\n$FUNCTION_EXECUTOR_STATUS_STARTING_UP\x10\x01\x12:\n6FUNCTION_EXECUTOR_STATUS_STARTUP_FAILED_CUSTOMER_ERROR\x10\x02\x12:\n6FUNCTION_EXECUTOR_STATUS_STARTUP_FAILED_PLATFORM_ERROR\x10\x03\x12!\n\x1d\x46UNCTION_EXECUTOR_STATUS_IDLE\x10\x04\x12)\n%FUNCTION_EXECUTOR_STATUS_RUNNING_TASK\x10\x05\x12&\n"FUNCTION_EXECUTOR_STATUS_UNHEALTHY\x10\x06\x12%\n!FUNCTION_EXECUTOR_STATUS_STOPPING\x10\x07\x12$\n FUNCTION_EXECUTOR_STATUS_STOPPED\x10\x08*\xc3\x01\n\x0e\x45xecutorStatus\x12\x1b\n\x17\x45XECUTOR_STATUS_UNKNOWN\x10\x00\x12\x1f\n\x1b\x45XECUTOR_STATUS_STARTING_UP\x10\x01\x12\x1b\n\x17\x45XECUTOR_STATUS_RUNNING\x10\x02\x12\x1b\n\x17\x45XECUTOR_STATUS_DRAINED\x10\x03\x12\x1c\n\x18\x45XECUTOR_STATUS_STOPPING\x10\x04\x12\x1b\n\x17\x45XECUTOR_STATUS_STOPPED\x10\x05*d\n\x0e\x45xecutorFlavor\x12\x1b\n\x17\x45XECUTOR_FLAVOR_UNKNOWN\x10\x00\x12\x17\n\x13\x45XECUTOR_FLAVOR_OSS\x10\x01\x12\x1c\n\x18\x45XECUTOR_FLAVOR_PLATFORM\x10\x02\x32\xff\x01\n\x0b\x45xecutorAPI\x12t\n\x15report_executor_state\x12+.executor_api_pb.ReportExecutorStateRequest\x1a,.executor_api_pb.ReportExecutorStateResponse"\x00\x12z\n\x1bget_desired_executor_states\x12\x30.executor_api_pb.GetDesiredExecutorStatesRequest\x1a%.executor_api_pb.DesiredExecutorState"\x00\x30\x01\x62\x06proto3'
|
23
|
+
)
|
24
|
+
|
25
|
+
_globals = globals()
|
26
|
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
27
|
+
_builder.BuildTopDescriptorsAndMessages(
|
28
|
+
DESCRIPTOR, "indexify.proto.executor_api_pb2", _globals
|
29
|
+
)
|
30
|
+
if not _descriptor._USE_C_DESCRIPTORS:
|
31
|
+
DESCRIPTOR._loaded_options = None
|
32
|
+
_globals["_EXECUTORSTATE_LABELSENTRY"]._loaded_options = None
|
33
|
+
_globals["_EXECUTORSTATE_LABELSENTRY"]._serialized_options = b"8\001"
|
34
|
+
_globals["_GPUMODEL"]._serialized_start = 2704
|
35
|
+
_globals["_GPUMODEL"]._serialized_end = 3094
|
36
|
+
_globals["_FUNCTIONEXECUTORSTATUS"]._serialized_start = 3097
|
37
|
+
_globals["_FUNCTIONEXECUTORSTATUS"]._serialized_end = 3516
|
38
|
+
_globals["_EXECUTORSTATUS"]._serialized_start = 3519
|
39
|
+
_globals["_EXECUTORSTATUS"]._serialized_end = 3714
|
40
|
+
_globals["_EXECUTORFLAVOR"]._serialized_start = 3716
|
41
|
+
_globals["_EXECUTORFLAVOR"]._serialized_end = 3816
|
42
|
+
_globals["_GPURESOURCES"]._serialized_start = 54
|
43
|
+
_globals["_GPURESOURCES"]._serialized_end = 155
|
44
|
+
_globals["_HOSTRESOURCES"]._serialized_start = 158
|
45
|
+
_globals["_HOSTRESOURCES"]._serialized_end = 352
|
46
|
+
_globals["_ALLOWEDFUNCTION"]._serialized_start = 355
|
47
|
+
_globals["_ALLOWEDFUNCTION"]._serialized_end = 542
|
48
|
+
_globals["_FUNCTIONEXECUTORDESCRIPTION"]._serialized_start = 545
|
49
|
+
_globals["_FUNCTIONEXECUTORDESCRIPTION"]._serialized_end = 910
|
50
|
+
_globals["_FUNCTIONEXECUTORSTATE"]._serialized_start = 913
|
51
|
+
_globals["_FUNCTIONEXECUTORSTATE"]._serialized_end = 1097
|
52
|
+
_globals["_EXECUTORSTATE"]._serialized_start = 1100
|
53
|
+
_globals["_EXECUTORSTATE"]._serialized_end = 1771
|
54
|
+
_globals["_EXECUTORSTATE_LABELSENTRY"]._serialized_start = 1608
|
55
|
+
_globals["_EXECUTORSTATE_LABELSENTRY"]._serialized_end = 1653
|
56
|
+
_globals["_REPORTEXECUTORSTATEREQUEST"]._serialized_start = 1773
|
57
|
+
_globals["_REPORTEXECUTORSTATEREQUEST"]._serialized_end = 1881
|
58
|
+
_globals["_REPORTEXECUTORSTATERESPONSE"]._serialized_start = 1883
|
59
|
+
_globals["_REPORTEXECUTORSTATERESPONSE"]._serialized_end = 1912
|
60
|
+
_globals["_TASK"]._serialized_start = 1915
|
61
|
+
_globals["_TASK"]._serialized_end = 2307
|
62
|
+
_globals["_TASKALLOCATION"]._serialized_start = 2309
|
63
|
+
_globals["_TASKALLOCATION"]._serialized_end = 2436
|
64
|
+
_globals["_GETDESIREDEXECUTORSTATESREQUEST"]._serialized_start = 2438
|
65
|
+
_globals["_GETDESIREDEXECUTORSTATESREQUEST"]._serialized_end = 2513
|
66
|
+
_globals["_DESIREDEXECUTORSTATE"]._serialized_start = 2516
|
67
|
+
_globals["_DESIREDEXECUTORSTATE"]._serialized_end = 2701
|
68
|
+
_globals["_EXECUTORAPI"]._serialized_start = 3819
|
69
|
+
_globals["_EXECUTORAPI"]._serialized_end = 4074
|
70
|
+
# @@protoc_insertion_point(module_scope)
|
@@ -50,6 +50,12 @@ class ExecutorStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
|
50
50
|
EXECUTOR_STATUS_STOPPING: _ClassVar[ExecutorStatus]
|
51
51
|
EXECUTOR_STATUS_STOPPED: _ClassVar[ExecutorStatus]
|
52
52
|
|
53
|
+
class ExecutorFlavor(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
54
|
+
__slots__ = ()
|
55
|
+
EXECUTOR_FLAVOR_UNKNOWN: _ClassVar[ExecutorFlavor]
|
56
|
+
EXECUTOR_FLAVOR_OSS: _ClassVar[ExecutorFlavor]
|
57
|
+
EXECUTOR_FLAVOR_PLATFORM: _ClassVar[ExecutorFlavor]
|
58
|
+
|
53
59
|
GPU_MODEL_UNKNOWN: GPUModel
|
54
60
|
GPU_MODEL_NVIDIA_TESLA_T4_16GB: GPUModel
|
55
61
|
GPU_MODEL_NVIDIA_TESLA_V100_16GB: GPUModel
|
@@ -76,6 +82,9 @@ EXECUTOR_STATUS_RUNNING: ExecutorStatus
|
|
76
82
|
EXECUTOR_STATUS_DRAINED: ExecutorStatus
|
77
83
|
EXECUTOR_STATUS_STOPPING: ExecutorStatus
|
78
84
|
EXECUTOR_STATUS_STOPPED: ExecutorStatus
|
85
|
+
EXECUTOR_FLAVOR_UNKNOWN: ExecutorFlavor
|
86
|
+
EXECUTOR_FLAVOR_OSS: ExecutorFlavor
|
87
|
+
EXECUTOR_FLAVOR_PLATFORM: ExecutorFlavor
|
79
88
|
|
80
89
|
class GPUResources(_message.Message):
|
81
90
|
__slots__ = ("count", "model")
|
@@ -178,30 +187,59 @@ class ExecutorState(_message.Message):
|
|
178
187
|
__slots__ = (
|
179
188
|
"executor_id",
|
180
189
|
"development_mode",
|
181
|
-
"
|
190
|
+
"hostname",
|
191
|
+
"flavor",
|
192
|
+
"version",
|
193
|
+
"status",
|
182
194
|
"free_resources",
|
183
195
|
"allowed_functions",
|
184
196
|
"function_executor_states",
|
197
|
+
"labels",
|
198
|
+
"state_hash",
|
185
199
|
)
|
200
|
+
|
201
|
+
class LabelsEntry(_message.Message):
|
202
|
+
__slots__ = ("key", "value")
|
203
|
+
KEY_FIELD_NUMBER: _ClassVar[int]
|
204
|
+
VALUE_FIELD_NUMBER: _ClassVar[int]
|
205
|
+
key: str
|
206
|
+
value: str
|
207
|
+
def __init__(
|
208
|
+
self, key: _Optional[str] = ..., value: _Optional[str] = ...
|
209
|
+
) -> None: ...
|
210
|
+
|
186
211
|
EXECUTOR_ID_FIELD_NUMBER: _ClassVar[int]
|
187
212
|
DEVELOPMENT_MODE_FIELD_NUMBER: _ClassVar[int]
|
188
|
-
|
213
|
+
HOSTNAME_FIELD_NUMBER: _ClassVar[int]
|
214
|
+
FLAVOR_FIELD_NUMBER: _ClassVar[int]
|
215
|
+
VERSION_FIELD_NUMBER: _ClassVar[int]
|
216
|
+
STATUS_FIELD_NUMBER: _ClassVar[int]
|
189
217
|
FREE_RESOURCES_FIELD_NUMBER: _ClassVar[int]
|
190
218
|
ALLOWED_FUNCTIONS_FIELD_NUMBER: _ClassVar[int]
|
191
219
|
FUNCTION_EXECUTOR_STATES_FIELD_NUMBER: _ClassVar[int]
|
220
|
+
LABELS_FIELD_NUMBER: _ClassVar[int]
|
221
|
+
STATE_HASH_FIELD_NUMBER: _ClassVar[int]
|
192
222
|
executor_id: str
|
193
223
|
development_mode: bool
|
194
|
-
|
224
|
+
hostname: str
|
225
|
+
flavor: ExecutorFlavor
|
226
|
+
version: str
|
227
|
+
status: ExecutorStatus
|
195
228
|
free_resources: HostResources
|
196
229
|
allowed_functions: _containers.RepeatedCompositeFieldContainer[AllowedFunction]
|
197
230
|
function_executor_states: _containers.RepeatedCompositeFieldContainer[
|
198
231
|
FunctionExecutorState
|
199
232
|
]
|
233
|
+
labels: _containers.ScalarMap[str, str]
|
234
|
+
state_hash: str
|
200
235
|
def __init__(
|
201
236
|
self,
|
202
237
|
executor_id: _Optional[str] = ...,
|
203
238
|
development_mode: bool = ...,
|
204
|
-
|
239
|
+
hostname: _Optional[str] = ...,
|
240
|
+
flavor: _Optional[_Union[ExecutorFlavor, str]] = ...,
|
241
|
+
version: _Optional[str] = ...,
|
242
|
+
status: _Optional[_Union[ExecutorStatus, str]] = ...,
|
205
243
|
free_resources: _Optional[_Union[HostResources, _Mapping]] = ...,
|
206
244
|
allowed_functions: _Optional[
|
207
245
|
_Iterable[_Union[AllowedFunction, _Mapping]]
|
@@ -209,6 +247,8 @@ class ExecutorState(_message.Message):
|
|
209
247
|
function_executor_states: _Optional[
|
210
248
|
_Iterable[_Union[FunctionExecutorState, _Mapping]]
|
211
249
|
] = ...,
|
250
|
+
labels: _Optional[_Mapping[str, str]] = ...,
|
251
|
+
state_hash: _Optional[str] = ...,
|
212
252
|
) -> None: ...
|
213
253
|
|
214
254
|
class ReportExecutorStateRequest(_message.Message):
|
@@ -4,9 +4,7 @@ import warnings
|
|
4
4
|
|
5
5
|
import grpc
|
6
6
|
|
7
|
-
from indexify.proto import
|
8
|
-
task_scheduler_pb2 as indexify_dot_proto_dot_task__scheduler__pb2,
|
9
|
-
)
|
7
|
+
from indexify.proto import executor_api_pb2 as indexify_dot_proto_dot_executor__api__pb2
|
10
8
|
|
11
9
|
GRPC_GENERATED_VERSION = "1.70.0"
|
12
10
|
GRPC_VERSION = grpc.__version__
|
@@ -24,16 +22,20 @@ except ImportError:
|
|
24
22
|
if _version_not_supported:
|
25
23
|
raise RuntimeError(
|
26
24
|
f"The grpc package installed is at version {GRPC_VERSION},"
|
27
|
-
+ f" but the generated code in indexify/proto/
|
25
|
+
+ f" but the generated code in indexify/proto/executor_api_pb2_grpc.py depends on"
|
28
26
|
+ f" grpcio>={GRPC_GENERATED_VERSION}."
|
29
27
|
+ f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
|
30
28
|
+ f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
|
31
29
|
)
|
32
30
|
|
33
31
|
|
34
|
-
class
|
32
|
+
class ExecutorAPIStub(object):
|
35
33
|
"""Internal API for scheduling and running tasks on Executors. Executors are acting as clients of this API.
|
36
34
|
Server is responsible for scheduling tasks on Executors and Executors are responsible for running the tasks.
|
35
|
+
|
36
|
+
Rename with caution. Existing clients won't find the service if the service name changes. A HTTP2 ingress proxy
|
37
|
+
might use the service name in it HTTP2 path based routing rules. See how gRPC uses service names in its HTTP2 paths
|
38
|
+
at https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md.
|
37
39
|
"""
|
38
40
|
|
39
41
|
def __init__(self, channel):
|
@@ -43,22 +45,26 @@ class TaskSchedulerServiceStub(object):
|
|
43
45
|
channel: A grpc.Channel.
|
44
46
|
"""
|
45
47
|
self.report_executor_state = channel.unary_unary(
|
46
|
-
"/
|
47
|
-
request_serializer=
|
48
|
-
response_deserializer=
|
48
|
+
"/executor_api_pb.ExecutorAPI/report_executor_state",
|
49
|
+
request_serializer=indexify_dot_proto_dot_executor__api__pb2.ReportExecutorStateRequest.SerializeToString,
|
50
|
+
response_deserializer=indexify_dot_proto_dot_executor__api__pb2.ReportExecutorStateResponse.FromString,
|
49
51
|
_registered_method=True,
|
50
52
|
)
|
51
53
|
self.get_desired_executor_states = channel.unary_stream(
|
52
|
-
"/
|
53
|
-
request_serializer=
|
54
|
-
response_deserializer=
|
54
|
+
"/executor_api_pb.ExecutorAPI/get_desired_executor_states",
|
55
|
+
request_serializer=indexify_dot_proto_dot_executor__api__pb2.GetDesiredExecutorStatesRequest.SerializeToString,
|
56
|
+
response_deserializer=indexify_dot_proto_dot_executor__api__pb2.DesiredExecutorState.FromString,
|
55
57
|
_registered_method=True,
|
56
58
|
)
|
57
59
|
|
58
60
|
|
59
|
-
class
|
61
|
+
class ExecutorAPIServicer(object):
|
60
62
|
"""Internal API for scheduling and running tasks on Executors. Executors are acting as clients of this API.
|
61
63
|
Server is responsible for scheduling tasks on Executors and Executors are responsible for running the tasks.
|
64
|
+
|
65
|
+
Rename with caution. Existing clients won't find the service if the service name changes. A HTTP2 ingress proxy
|
66
|
+
might use the service name in it HTTP2 path based routing rules. See how gRPC uses service names in its HTTP2 paths
|
67
|
+
at https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md.
|
62
68
|
"""
|
63
69
|
|
64
70
|
def report_executor_state(self, request, context):
|
@@ -81,32 +87,36 @@ class TaskSchedulerServiceServicer(object):
|
|
81
87
|
raise NotImplementedError("Method not implemented!")
|
82
88
|
|
83
89
|
|
84
|
-
def
|
90
|
+
def add_ExecutorAPIServicer_to_server(servicer, server):
|
85
91
|
rpc_method_handlers = {
|
86
92
|
"report_executor_state": grpc.unary_unary_rpc_method_handler(
|
87
93
|
servicer.report_executor_state,
|
88
|
-
request_deserializer=
|
89
|
-
response_serializer=
|
94
|
+
request_deserializer=indexify_dot_proto_dot_executor__api__pb2.ReportExecutorStateRequest.FromString,
|
95
|
+
response_serializer=indexify_dot_proto_dot_executor__api__pb2.ReportExecutorStateResponse.SerializeToString,
|
90
96
|
),
|
91
97
|
"get_desired_executor_states": grpc.unary_stream_rpc_method_handler(
|
92
98
|
servicer.get_desired_executor_states,
|
93
|
-
request_deserializer=
|
94
|
-
response_serializer=
|
99
|
+
request_deserializer=indexify_dot_proto_dot_executor__api__pb2.GetDesiredExecutorStatesRequest.FromString,
|
100
|
+
response_serializer=indexify_dot_proto_dot_executor__api__pb2.DesiredExecutorState.SerializeToString,
|
95
101
|
),
|
96
102
|
}
|
97
103
|
generic_handler = grpc.method_handlers_generic_handler(
|
98
|
-
"
|
104
|
+
"executor_api_pb.ExecutorAPI", rpc_method_handlers
|
99
105
|
)
|
100
106
|
server.add_generic_rpc_handlers((generic_handler,))
|
101
107
|
server.add_registered_method_handlers(
|
102
|
-
"
|
108
|
+
"executor_api_pb.ExecutorAPI", rpc_method_handlers
|
103
109
|
)
|
104
110
|
|
105
111
|
|
106
112
|
# This class is part of an EXPERIMENTAL API.
|
107
|
-
class
|
113
|
+
class ExecutorAPI(object):
|
108
114
|
"""Internal API for scheduling and running tasks on Executors. Executors are acting as clients of this API.
|
109
115
|
Server is responsible for scheduling tasks on Executors and Executors are responsible for running the tasks.
|
116
|
+
|
117
|
+
Rename with caution. Existing clients won't find the service if the service name changes. A HTTP2 ingress proxy
|
118
|
+
might use the service name in it HTTP2 path based routing rules. See how gRPC uses service names in its HTTP2 paths
|
119
|
+
at https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md.
|
110
120
|
"""
|
111
121
|
|
112
122
|
@staticmethod
|
@@ -125,9 +135,9 @@ class TaskSchedulerService(object):
|
|
125
135
|
return grpc.experimental.unary_unary(
|
126
136
|
request,
|
127
137
|
target,
|
128
|
-
"/
|
129
|
-
|
130
|
-
|
138
|
+
"/executor_api_pb.ExecutorAPI/report_executor_state",
|
139
|
+
indexify_dot_proto_dot_executor__api__pb2.ReportExecutorStateRequest.SerializeToString,
|
140
|
+
indexify_dot_proto_dot_executor__api__pb2.ReportExecutorStateResponse.FromString,
|
131
141
|
options,
|
132
142
|
channel_credentials,
|
133
143
|
insecure,
|
@@ -155,9 +165,9 @@ class TaskSchedulerService(object):
|
|
155
165
|
return grpc.experimental.unary_stream(
|
156
166
|
request,
|
157
167
|
target,
|
158
|
-
"/
|
159
|
-
|
160
|
-
|
168
|
+
"/executor_api_pb.ExecutorAPI/get_desired_executor_states",
|
169
|
+
indexify_dot_proto_dot_executor__api__pb2.GetDesiredExecutorStatesRequest.SerializeToString,
|
170
|
+
indexify_dot_proto_dot_executor__api__pb2.DesiredExecutorState.FromString,
|
161
171
|
options,
|
162
172
|
channel_credentials,
|
163
173
|
insecure,
|