lumen-app 0.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lumen_app/__init__.py +7 -0
- lumen_app/core/__init__.py +0 -0
- lumen_app/core/config.py +661 -0
- lumen_app/core/installer.py +274 -0
- lumen_app/core/loader.py +45 -0
- lumen_app/core/router.py +87 -0
- lumen_app/core/server.py +389 -0
- lumen_app/core/service.py +49 -0
- lumen_app/core/tests/__init__.py +1 -0
- lumen_app/core/tests/test_core_integration.py +561 -0
- lumen_app/core/tests/test_env_checker.py +487 -0
- lumen_app/proto/README.md +12 -0
- lumen_app/proto/ml_service.proto +88 -0
- lumen_app/proto/ml_service_pb2.py +66 -0
- lumen_app/proto/ml_service_pb2.pyi +136 -0
- lumen_app/proto/ml_service_pb2_grpc.py +251 -0
- lumen_app/server.py +362 -0
- lumen_app/utils/env_checker.py +752 -0
- lumen_app/utils/installation/__init__.py +25 -0
- lumen_app/utils/installation/env_manager.py +152 -0
- lumen_app/utils/installation/micromamba_installer.py +459 -0
- lumen_app/utils/installation/package_installer.py +149 -0
- lumen_app/utils/installation/verifier.py +95 -0
- lumen_app/utils/logger.py +181 -0
- lumen_app/utils/mamba/cuda.yaml +12 -0
- lumen_app/utils/mamba/default.yaml +6 -0
- lumen_app/utils/mamba/openvino.yaml +7 -0
- lumen_app/utils/mamba/tensorrt.yaml +13 -0
- lumen_app/utils/package_resolver.py +309 -0
- lumen_app/utils/preset_registry.py +219 -0
- lumen_app/web/__init__.py +3 -0
- lumen_app/web/api/__init__.py +1 -0
- lumen_app/web/api/config.py +229 -0
- lumen_app/web/api/hardware.py +201 -0
- lumen_app/web/api/install.py +608 -0
- lumen_app/web/api/server.py +253 -0
- lumen_app/web/core/__init__.py +1 -0
- lumen_app/web/core/server_manager.py +348 -0
- lumen_app/web/core/state.py +264 -0
- lumen_app/web/main.py +145 -0
- lumen_app/web/models/__init__.py +28 -0
- lumen_app/web/models/config.py +63 -0
- lumen_app/web/models/hardware.py +64 -0
- lumen_app/web/models/install.py +134 -0
- lumen_app/web/models/server.py +95 -0
- lumen_app/web/static/assets/index-CGuhGHC9.css +1 -0
- lumen_app/web/static/assets/index-DN6HmxWS.js +56 -0
- lumen_app/web/static/index.html +14 -0
- lumen_app/web/static/vite.svg +1 -0
- lumen_app/web/websockets/__init__.py +1 -0
- lumen_app/web/websockets/logs.py +159 -0
- lumen_app-0.4.2.dist-info/METADATA +23 -0
- lumen_app-0.4.2.dist-info/RECORD +56 -0
- lumen_app-0.4.2.dist-info/WHEEL +5 -0
- lumen_app-0.4.2.dist-info/entry_points.txt +3 -0
- lumen_app-0.4.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
from google.protobuf import empty_pb2 as _empty_pb2
|
|
2
|
+
from google.protobuf.internal import containers as _containers
|
|
3
|
+
from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper
|
|
4
|
+
from google.protobuf import descriptor as _descriptor
|
|
5
|
+
from google.protobuf import message as _message
|
|
6
|
+
from collections.abc import Iterable as _Iterable, Mapping as _Mapping
|
|
7
|
+
from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union
|
|
8
|
+
|
|
9
|
+
DESCRIPTOR: _descriptor.FileDescriptor
|
|
10
|
+
|
|
11
|
+
class ErrorCode(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
|
12
|
+
__slots__ = ()
|
|
13
|
+
ERROR_CODE_UNSPECIFIED: _ClassVar[ErrorCode]
|
|
14
|
+
ERROR_CODE_INVALID_ARGUMENT: _ClassVar[ErrorCode]
|
|
15
|
+
ERROR_CODE_UNAVAILABLE: _ClassVar[ErrorCode]
|
|
16
|
+
ERROR_CODE_DEADLINE_EXCEEDED: _ClassVar[ErrorCode]
|
|
17
|
+
ERROR_CODE_INTERNAL: _ClassVar[ErrorCode]
|
|
18
|
+
ERROR_CODE_UNSPECIFIED: ErrorCode
|
|
19
|
+
ERROR_CODE_INVALID_ARGUMENT: ErrorCode
|
|
20
|
+
ERROR_CODE_UNAVAILABLE: ErrorCode
|
|
21
|
+
ERROR_CODE_DEADLINE_EXCEEDED: ErrorCode
|
|
22
|
+
ERROR_CODE_INTERNAL: ErrorCode
|
|
23
|
+
|
|
24
|
+
class Error(_message.Message):
|
|
25
|
+
__slots__ = ("code", "message", "detail")
|
|
26
|
+
CODE_FIELD_NUMBER: _ClassVar[int]
|
|
27
|
+
MESSAGE_FIELD_NUMBER: _ClassVar[int]
|
|
28
|
+
DETAIL_FIELD_NUMBER: _ClassVar[int]
|
|
29
|
+
code: ErrorCode
|
|
30
|
+
message: str
|
|
31
|
+
detail: str
|
|
32
|
+
def __init__(self, code: _Optional[_Union[ErrorCode, str]] = ..., message: _Optional[str] = ..., detail: _Optional[str] = ...) -> None: ...
|
|
33
|
+
|
|
34
|
+
class IOTask(_message.Message):
|
|
35
|
+
__slots__ = ("name", "input_mimes", "output_mimes", "limits")
|
|
36
|
+
class LimitsEntry(_message.Message):
|
|
37
|
+
__slots__ = ("key", "value")
|
|
38
|
+
KEY_FIELD_NUMBER: _ClassVar[int]
|
|
39
|
+
VALUE_FIELD_NUMBER: _ClassVar[int]
|
|
40
|
+
key: str
|
|
41
|
+
value: str
|
|
42
|
+
def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ...
|
|
43
|
+
NAME_FIELD_NUMBER: _ClassVar[int]
|
|
44
|
+
INPUT_MIMES_FIELD_NUMBER: _ClassVar[int]
|
|
45
|
+
OUTPUT_MIMES_FIELD_NUMBER: _ClassVar[int]
|
|
46
|
+
LIMITS_FIELD_NUMBER: _ClassVar[int]
|
|
47
|
+
name: str
|
|
48
|
+
input_mimes: _containers.RepeatedScalarFieldContainer[str]
|
|
49
|
+
output_mimes: _containers.RepeatedScalarFieldContainer[str]
|
|
50
|
+
limits: _containers.ScalarMap[str, str]
|
|
51
|
+
def __init__(self, name: _Optional[str] = ..., input_mimes: _Optional[_Iterable[str]] = ..., output_mimes: _Optional[_Iterable[str]] = ..., limits: _Optional[_Mapping[str, str]] = ...) -> None: ...
|
|
52
|
+
|
|
53
|
+
class Capability(_message.Message):
|
|
54
|
+
__slots__ = ("service_name", "model_ids", "runtime", "max_concurrency", "precisions", "extra", "tasks", "protocol_version")
|
|
55
|
+
class ExtraEntry(_message.Message):
|
|
56
|
+
__slots__ = ("key", "value")
|
|
57
|
+
KEY_FIELD_NUMBER: _ClassVar[int]
|
|
58
|
+
VALUE_FIELD_NUMBER: _ClassVar[int]
|
|
59
|
+
key: str
|
|
60
|
+
value: str
|
|
61
|
+
def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ...
|
|
62
|
+
SERVICE_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
63
|
+
MODEL_IDS_FIELD_NUMBER: _ClassVar[int]
|
|
64
|
+
RUNTIME_FIELD_NUMBER: _ClassVar[int]
|
|
65
|
+
MAX_CONCURRENCY_FIELD_NUMBER: _ClassVar[int]
|
|
66
|
+
PRECISIONS_FIELD_NUMBER: _ClassVar[int]
|
|
67
|
+
EXTRA_FIELD_NUMBER: _ClassVar[int]
|
|
68
|
+
TASKS_FIELD_NUMBER: _ClassVar[int]
|
|
69
|
+
PROTOCOL_VERSION_FIELD_NUMBER: _ClassVar[int]
|
|
70
|
+
service_name: str
|
|
71
|
+
model_ids: _containers.RepeatedScalarFieldContainer[str]
|
|
72
|
+
runtime: str
|
|
73
|
+
max_concurrency: int
|
|
74
|
+
precisions: _containers.RepeatedScalarFieldContainer[str]
|
|
75
|
+
extra: _containers.ScalarMap[str, str]
|
|
76
|
+
tasks: _containers.RepeatedCompositeFieldContainer[IOTask]
|
|
77
|
+
protocol_version: str
|
|
78
|
+
def __init__(self, service_name: _Optional[str] = ..., model_ids: _Optional[_Iterable[str]] = ..., runtime: _Optional[str] = ..., max_concurrency: _Optional[int] = ..., precisions: _Optional[_Iterable[str]] = ..., extra: _Optional[_Mapping[str, str]] = ..., tasks: _Optional[_Iterable[_Union[IOTask, _Mapping]]] = ..., protocol_version: _Optional[str] = ...) -> None: ...
|
|
79
|
+
|
|
80
|
+
class InferRequest(_message.Message):
|
|
81
|
+
__slots__ = ("correlation_id", "task", "payload", "meta", "payload_mime", "seq", "total", "offset")
|
|
82
|
+
class MetaEntry(_message.Message):
|
|
83
|
+
__slots__ = ("key", "value")
|
|
84
|
+
KEY_FIELD_NUMBER: _ClassVar[int]
|
|
85
|
+
VALUE_FIELD_NUMBER: _ClassVar[int]
|
|
86
|
+
key: str
|
|
87
|
+
value: str
|
|
88
|
+
def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ...
|
|
89
|
+
CORRELATION_ID_FIELD_NUMBER: _ClassVar[int]
|
|
90
|
+
TASK_FIELD_NUMBER: _ClassVar[int]
|
|
91
|
+
PAYLOAD_FIELD_NUMBER: _ClassVar[int]
|
|
92
|
+
META_FIELD_NUMBER: _ClassVar[int]
|
|
93
|
+
PAYLOAD_MIME_FIELD_NUMBER: _ClassVar[int]
|
|
94
|
+
SEQ_FIELD_NUMBER: _ClassVar[int]
|
|
95
|
+
TOTAL_FIELD_NUMBER: _ClassVar[int]
|
|
96
|
+
OFFSET_FIELD_NUMBER: _ClassVar[int]
|
|
97
|
+
correlation_id: str
|
|
98
|
+
task: str
|
|
99
|
+
payload: bytes
|
|
100
|
+
meta: _containers.ScalarMap[str, str]
|
|
101
|
+
payload_mime: str
|
|
102
|
+
seq: int
|
|
103
|
+
total: int
|
|
104
|
+
offset: int
|
|
105
|
+
def __init__(self, correlation_id: _Optional[str] = ..., task: _Optional[str] = ..., payload: _Optional[bytes] = ..., meta: _Optional[_Mapping[str, str]] = ..., payload_mime: _Optional[str] = ..., seq: _Optional[int] = ..., total: _Optional[int] = ..., offset: _Optional[int] = ...) -> None: ...
|
|
106
|
+
|
|
107
|
+
class InferResponse(_message.Message):
|
|
108
|
+
__slots__ = ("correlation_id", "is_final", "result", "meta", "error", "seq", "total", "offset", "result_mime", "result_schema")
|
|
109
|
+
class MetaEntry(_message.Message):
|
|
110
|
+
__slots__ = ("key", "value")
|
|
111
|
+
KEY_FIELD_NUMBER: _ClassVar[int]
|
|
112
|
+
VALUE_FIELD_NUMBER: _ClassVar[int]
|
|
113
|
+
key: str
|
|
114
|
+
value: str
|
|
115
|
+
def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ...
|
|
116
|
+
CORRELATION_ID_FIELD_NUMBER: _ClassVar[int]
|
|
117
|
+
IS_FINAL_FIELD_NUMBER: _ClassVar[int]
|
|
118
|
+
RESULT_FIELD_NUMBER: _ClassVar[int]
|
|
119
|
+
META_FIELD_NUMBER: _ClassVar[int]
|
|
120
|
+
ERROR_FIELD_NUMBER: _ClassVar[int]
|
|
121
|
+
SEQ_FIELD_NUMBER: _ClassVar[int]
|
|
122
|
+
TOTAL_FIELD_NUMBER: _ClassVar[int]
|
|
123
|
+
OFFSET_FIELD_NUMBER: _ClassVar[int]
|
|
124
|
+
RESULT_MIME_FIELD_NUMBER: _ClassVar[int]
|
|
125
|
+
RESULT_SCHEMA_FIELD_NUMBER: _ClassVar[int]
|
|
126
|
+
correlation_id: str
|
|
127
|
+
is_final: bool
|
|
128
|
+
result: bytes
|
|
129
|
+
meta: _containers.ScalarMap[str, str]
|
|
130
|
+
error: Error
|
|
131
|
+
seq: int
|
|
132
|
+
total: int
|
|
133
|
+
offset: int
|
|
134
|
+
result_mime: str
|
|
135
|
+
result_schema: str
|
|
136
|
+
def __init__(self, correlation_id: _Optional[str] = ..., is_final: bool = ..., result: _Optional[bytes] = ..., meta: _Optional[_Mapping[str, str]] = ..., error: _Optional[_Union[Error, _Mapping]] = ..., seq: _Optional[int] = ..., total: _Optional[int] = ..., offset: _Optional[int] = ..., result_mime: _Optional[str] = ..., result_schema: _Optional[str] = ...) -> None: ...
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
|
2
|
+
"""Client and server classes corresponding to protobuf-defined services."""
|
|
3
|
+
|
|
4
|
+
import warnings
|
|
5
|
+
|
|
6
|
+
import grpc
|
|
7
|
+
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
|
|
8
|
+
|
|
9
|
+
from lumen_app.proto import ml_service_pb2 as proto_dot_ml__service__pb2
|
|
10
|
+
|
|
11
|
+
GRPC_GENERATED_VERSION = "1.76.0"
|
|
12
|
+
GRPC_VERSION = grpc.__version__
|
|
13
|
+
_version_not_supported = False
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
from grpc._utilities import first_version_is_lower
|
|
17
|
+
|
|
18
|
+
_version_not_supported = first_version_is_lower(
|
|
19
|
+
GRPC_VERSION, GRPC_GENERATED_VERSION
|
|
20
|
+
)
|
|
21
|
+
except ImportError:
|
|
22
|
+
_version_not_supported = True
|
|
23
|
+
|
|
24
|
+
if _version_not_supported:
|
|
25
|
+
raise RuntimeError(
|
|
26
|
+
f"The grpc package installed is at version {GRPC_VERSION},"
|
|
27
|
+
+ " but the generated code in proto/ml_service_pb2_grpc.py depends on"
|
|
28
|
+
+ f" grpcio>={GRPC_GENERATED_VERSION}."
|
|
29
|
+
+ f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
|
|
30
|
+
+ f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class InferenceStub(object):
|
|
35
|
+
"""---- Service contract ----"""
|
|
36
|
+
|
|
37
|
+
def __init__(self, channel):
|
|
38
|
+
"""Constructor.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
channel: A grpc.Channel.
|
|
42
|
+
"""
|
|
43
|
+
self.Infer = channel.stream_stream(
|
|
44
|
+
"/home_native.v1.Inference/Infer",
|
|
45
|
+
request_serializer=proto_dot_ml__service__pb2.InferRequest.SerializeToString,
|
|
46
|
+
response_deserializer=proto_dot_ml__service__pb2.InferResponse.FromString,
|
|
47
|
+
_registered_method=True,
|
|
48
|
+
)
|
|
49
|
+
self.GetCapabilities = channel.unary_unary(
|
|
50
|
+
"/home_native.v1.Inference/GetCapabilities",
|
|
51
|
+
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
|
|
52
|
+
response_deserializer=proto_dot_ml__service__pb2.Capability.FromString,
|
|
53
|
+
_registered_method=True,
|
|
54
|
+
)
|
|
55
|
+
self.StreamCapabilities = channel.unary_stream(
|
|
56
|
+
"/home_native.v1.Inference/StreamCapabilities",
|
|
57
|
+
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
|
|
58
|
+
response_deserializer=proto_dot_ml__service__pb2.Capability.FromString,
|
|
59
|
+
_registered_method=True,
|
|
60
|
+
)
|
|
61
|
+
self.Health = channel.unary_unary(
|
|
62
|
+
"/home_native.v1.Inference/Health",
|
|
63
|
+
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
|
|
64
|
+
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
|
|
65
|
+
_registered_method=True,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class InferenceServicer(object):
|
|
70
|
+
"""---- Service contract ----"""
|
|
71
|
+
|
|
72
|
+
def Infer(self, request_iterator, context):
|
|
73
|
+
"""Bidirectional stream: client sends chunks; server returns incremental/final results; ordered but non-blocking"""
|
|
74
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
75
|
+
context.set_details("Method not implemented!")
|
|
76
|
+
raise NotImplementedError("Method not implemented!")
|
|
77
|
+
|
|
78
|
+
def GetCapabilities(self, request, context):
|
|
79
|
+
"""Capability declaration (backward compatibility: single capability; use StreamCapabilities for multiple)"""
|
|
80
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
81
|
+
context.set_details("Method not implemented!")
|
|
82
|
+
raise NotImplementedError("Method not implemented!")
|
|
83
|
+
|
|
84
|
+
def StreamCapabilities(self, request, context):
|
|
85
|
+
"""Recommended: server stream returns all capabilities (call at startup or after hot-reload)"""
|
|
86
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
87
|
+
context.set_details("Method not implemented!")
|
|
88
|
+
raise NotImplementedError("Method not implemented!")
|
|
89
|
+
|
|
90
|
+
def Health(self, request, context):
|
|
91
|
+
"""Health probe"""
|
|
92
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
93
|
+
context.set_details("Method not implemented!")
|
|
94
|
+
raise NotImplementedError("Method not implemented!")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def add_InferenceServicer_to_server(servicer, server):
|
|
98
|
+
rpc_method_handlers = {
|
|
99
|
+
"Infer": grpc.stream_stream_rpc_method_handler(
|
|
100
|
+
servicer.Infer,
|
|
101
|
+
request_deserializer=proto_dot_ml__service__pb2.InferRequest.FromString,
|
|
102
|
+
response_serializer=proto_dot_ml__service__pb2.InferResponse.SerializeToString,
|
|
103
|
+
),
|
|
104
|
+
"GetCapabilities": grpc.unary_unary_rpc_method_handler(
|
|
105
|
+
servicer.GetCapabilities,
|
|
106
|
+
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
|
|
107
|
+
response_serializer=proto_dot_ml__service__pb2.Capability.SerializeToString,
|
|
108
|
+
),
|
|
109
|
+
"StreamCapabilities": grpc.unary_stream_rpc_method_handler(
|
|
110
|
+
servicer.StreamCapabilities,
|
|
111
|
+
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
|
|
112
|
+
response_serializer=proto_dot_ml__service__pb2.Capability.SerializeToString,
|
|
113
|
+
),
|
|
114
|
+
"Health": grpc.unary_unary_rpc_method_handler(
|
|
115
|
+
servicer.Health,
|
|
116
|
+
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
|
|
117
|
+
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
|
|
118
|
+
),
|
|
119
|
+
}
|
|
120
|
+
generic_handler = grpc.method_handlers_generic_handler(
|
|
121
|
+
"home_native.v1.Inference", rpc_method_handlers
|
|
122
|
+
)
|
|
123
|
+
server.add_generic_rpc_handlers((generic_handler,))
|
|
124
|
+
server.add_registered_method_handlers(
|
|
125
|
+
"home_native.v1.Inference", rpc_method_handlers
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
# This class is part of an EXPERIMENTAL API.
|
|
130
|
+
class Inference(object):
|
|
131
|
+
"""---- Service contract ----"""
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def Infer(
|
|
135
|
+
request_iterator,
|
|
136
|
+
target,
|
|
137
|
+
options=(),
|
|
138
|
+
channel_credentials=None,
|
|
139
|
+
call_credentials=None,
|
|
140
|
+
insecure=False,
|
|
141
|
+
compression=None,
|
|
142
|
+
wait_for_ready=None,
|
|
143
|
+
timeout=None,
|
|
144
|
+
metadata=None,
|
|
145
|
+
):
|
|
146
|
+
return grpc.experimental.stream_stream(
|
|
147
|
+
request_iterator,
|
|
148
|
+
target,
|
|
149
|
+
"/home_native.v1.Inference/Infer",
|
|
150
|
+
proto_dot_ml__service__pb2.InferRequest.SerializeToString,
|
|
151
|
+
proto_dot_ml__service__pb2.InferResponse.FromString,
|
|
152
|
+
options,
|
|
153
|
+
channel_credentials,
|
|
154
|
+
insecure,
|
|
155
|
+
call_credentials,
|
|
156
|
+
compression,
|
|
157
|
+
wait_for_ready,
|
|
158
|
+
timeout,
|
|
159
|
+
metadata,
|
|
160
|
+
_registered_method=True,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
@staticmethod
|
|
164
|
+
def GetCapabilities(
|
|
165
|
+
request,
|
|
166
|
+
target,
|
|
167
|
+
options=(),
|
|
168
|
+
channel_credentials=None,
|
|
169
|
+
call_credentials=None,
|
|
170
|
+
insecure=False,
|
|
171
|
+
compression=None,
|
|
172
|
+
wait_for_ready=None,
|
|
173
|
+
timeout=None,
|
|
174
|
+
metadata=None,
|
|
175
|
+
):
|
|
176
|
+
return grpc.experimental.unary_unary(
|
|
177
|
+
request,
|
|
178
|
+
target,
|
|
179
|
+
"/home_native.v1.Inference/GetCapabilities",
|
|
180
|
+
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
|
|
181
|
+
proto_dot_ml__service__pb2.Capability.FromString,
|
|
182
|
+
options,
|
|
183
|
+
channel_credentials,
|
|
184
|
+
insecure,
|
|
185
|
+
call_credentials,
|
|
186
|
+
compression,
|
|
187
|
+
wait_for_ready,
|
|
188
|
+
timeout,
|
|
189
|
+
metadata,
|
|
190
|
+
_registered_method=True,
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
@staticmethod
|
|
194
|
+
def StreamCapabilities(
|
|
195
|
+
request,
|
|
196
|
+
target,
|
|
197
|
+
options=(),
|
|
198
|
+
channel_credentials=None,
|
|
199
|
+
call_credentials=None,
|
|
200
|
+
insecure=False,
|
|
201
|
+
compression=None,
|
|
202
|
+
wait_for_ready=None,
|
|
203
|
+
timeout=None,
|
|
204
|
+
metadata=None,
|
|
205
|
+
):
|
|
206
|
+
return grpc.experimental.unary_stream(
|
|
207
|
+
request,
|
|
208
|
+
target,
|
|
209
|
+
"/home_native.v1.Inference/StreamCapabilities",
|
|
210
|
+
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
|
|
211
|
+
proto_dot_ml__service__pb2.Capability.FromString,
|
|
212
|
+
options,
|
|
213
|
+
channel_credentials,
|
|
214
|
+
insecure,
|
|
215
|
+
call_credentials,
|
|
216
|
+
compression,
|
|
217
|
+
wait_for_ready,
|
|
218
|
+
timeout,
|
|
219
|
+
metadata,
|
|
220
|
+
_registered_method=True,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
@staticmethod
|
|
224
|
+
def Health(
|
|
225
|
+
request,
|
|
226
|
+
target,
|
|
227
|
+
options=(),
|
|
228
|
+
channel_credentials=None,
|
|
229
|
+
call_credentials=None,
|
|
230
|
+
insecure=False,
|
|
231
|
+
compression=None,
|
|
232
|
+
wait_for_ready=None,
|
|
233
|
+
timeout=None,
|
|
234
|
+
metadata=None,
|
|
235
|
+
):
|
|
236
|
+
return grpc.experimental.unary_unary(
|
|
237
|
+
request,
|
|
238
|
+
target,
|
|
239
|
+
"/home_native.v1.Inference/Health",
|
|
240
|
+
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
|
|
241
|
+
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
|
|
242
|
+
options,
|
|
243
|
+
channel_credentials,
|
|
244
|
+
insecure,
|
|
245
|
+
call_credentials,
|
|
246
|
+
compression,
|
|
247
|
+
wait_for_ready,
|
|
248
|
+
timeout,
|
|
249
|
+
metadata,
|
|
250
|
+
_registered_method=True,
|
|
251
|
+
)
|