clarifai 11.1.5rc8__py3-none-any.whl → 11.1.6rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/cli/__pycache__/__main__.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
- clarifai/cli/model.py +40 -50
- clarifai/client/model.py +393 -157
- clarifai/runners/__init__.py +7 -2
- clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/runners/dockerfile_template/Dockerfile.template +1 -4
- clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_builder.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
- clarifai/runners/models/base_typed_model.py +238 -0
- clarifai/runners/models/model_builder.py +9 -26
- clarifai/runners/models/model_class.py +28 -256
- clarifai/runners/models/model_run_locally.py +78 -3
- clarifai/runners/models/model_runner.py +0 -2
- clarifai/runners/models/model_servicer.py +2 -11
- clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_types.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/method_signatures.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/serializers.cpython-310.pyc +0 -0
- clarifai/runners/utils/data_handler.py +205 -308
- {clarifai-11.1.5rc8.dist-info → clarifai-11.1.6rc1.dist-info}/METADATA +1 -2
- {clarifai-11.1.5rc8.dist-info → clarifai-11.1.6rc1.dist-info}/RECORD +29 -28
- {clarifai-11.1.5rc8.dist-info → clarifai-11.1.6rc1.dist-info}/LICENSE +0 -0
- {clarifai-11.1.5rc8.dist-info → clarifai-11.1.6rc1.dist-info}/WHEEL +0 -0
- {clarifai-11.1.5rc8.dist-info → clarifai-11.1.6rc1.dist-info}/entry_points.txt +0 -0
- {clarifai-11.1.5rc8.dist-info → clarifai-11.1.6rc1.dist-info}/top_level.txt +0 -0
clarifai/runners/__init__.py
CHANGED
@@ -1,9 +1,14 @@
|
|
1
|
+
from .models.base_typed_model import AnyAnyModel, TextInputModel, VisualInputModel
|
1
2
|
from .models.model_builder import ModelBuilder
|
2
|
-
from .models.model_class import ModelClass
|
3
3
|
from .models.model_runner import ModelRunner
|
4
|
+
from .utils.data_handler import InputDataHandler, OutputDataHandler
|
4
5
|
|
5
6
|
__all__ = [
|
6
7
|
"ModelRunner",
|
7
8
|
"ModelBuilder",
|
8
|
-
"
|
9
|
+
"InputDataHandler",
|
10
|
+
"OutputDataHandler",
|
11
|
+
"AnyAnyModel",
|
12
|
+
"TextInputModel",
|
13
|
+
"VisualInputModel",
|
9
14
|
]
|
Binary file
|
@@ -24,7 +24,7 @@ COPY --chown=nonroot:nonroot downloader/unused.yaml /home/nonroot/main/1/checkpo
|
|
24
24
|
#####
|
25
25
|
# Download checkpoints if config.yaml has checkpoints.when = "build"
|
26
26
|
COPY --link=true config.yaml /home/nonroot/main/
|
27
|
-
RUN ["python", "-m", "clarifai.cli", "model", "download-checkpoints", "
|
27
|
+
RUN ["python", "-m", "clarifai.cli", "model", "download-checkpoints", "/home/nonroot/main", "--out_path", "/home/nonroot/main/1/checkpoints", "--stage", "build"]
|
28
28
|
#####
|
29
29
|
|
30
30
|
# Copy in the actual files like config.yaml, requirements.txt, and most importantly 1/model.py
|
@@ -44,9 +44,6 @@ ENV PYTHONPATH=${PYTHONPATH}:/home/nonroot/main \
|
|
44
44
|
CLARIFAI_COMPUTE_CLUSTER_ID=${CLARIFAI_COMPUTE_CLUSTER_ID} \
|
45
45
|
CLARIFAI_API_BASE=${CLARIFAI_API_BASE:-https://api.clarifai.com}
|
46
46
|
|
47
|
-
# # Write out the model function signatures
|
48
|
-
# RUN ["python", "-m", "clarifai.cli", "model", "signatures", "--model_path", "/home/nonroot/main", "--out_path", "/home/nonroot/main/signatures.yaml"]
|
49
|
-
|
50
47
|
# Finally run the clarifai entrypoint to start the runner loop and local dev server.
|
51
48
|
# Note(zeiler): we may want to make this a clarifai CLI call.
|
52
49
|
ENTRYPOINT ["python", "-m", "clarifai.runners.server"]
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
@@ -0,0 +1,238 @@
|
|
1
|
+
import itertools
|
2
|
+
from typing import Any, Dict, Iterator, List, Tuple
|
3
|
+
|
4
|
+
import numpy as np
|
5
|
+
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
|
6
|
+
from clarifai_grpc.grpc.api.service_pb2 import PostModelOutputsRequest
|
7
|
+
from google.protobuf import json_format
|
8
|
+
|
9
|
+
from ..utils.data_handler import InputDataHandler, OutputDataHandler
|
10
|
+
from .model_class import ModelClass
|
11
|
+
|
12
|
+
|
13
|
+
class AnyAnyModel(ModelClass):
|
14
|
+
|
15
|
+
def load_model(self):
|
16
|
+
"""
|
17
|
+
Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
|
18
|
+
in this method so they are loaded only once for faster inference.
|
19
|
+
"""
|
20
|
+
raise NotImplementedError
|
21
|
+
|
22
|
+
def parse_input_request(
|
23
|
+
self, input_request: service_pb2.PostModelOutputsRequest) -> Tuple[List[Dict], Dict]:
|
24
|
+
list_input_dict = [
|
25
|
+
InputDataHandler.from_proto(input).to_python() for input in input_request.inputs
|
26
|
+
]
|
27
|
+
inference_params = json_format.MessageToDict(
|
28
|
+
input_request.model.model_version.output_info.params)
|
29
|
+
|
30
|
+
return list_input_dict, inference_params
|
31
|
+
|
32
|
+
def convert_output_to_proto(self, outputs: list):
|
33
|
+
assert (isinstance(outputs, Iterator) or isinstance(outputs, list) or
|
34
|
+
isinstance(outputs, tuple)), "outputs must be an Iterator"
|
35
|
+
output_protos = []
|
36
|
+
for output in outputs:
|
37
|
+
if isinstance(output, OutputDataHandler):
|
38
|
+
output = output.proto
|
39
|
+
elif isinstance(output, resources_pb2.Output):
|
40
|
+
pass
|
41
|
+
else:
|
42
|
+
raise NotImplementedError
|
43
|
+
output_protos.append(output)
|
44
|
+
|
45
|
+
return service_pb2.MultiOutputResponse(outputs=output_protos)
|
46
|
+
|
47
|
+
def predict_wrapper(
|
48
|
+
self, request: service_pb2.PostModelOutputsRequest) -> service_pb2.MultiOutputResponse:
|
49
|
+
list_dict_input, inference_params = self.parse_input_request(request)
|
50
|
+
outputs = self.predict(list_dict_input, inference_parameters=inference_params)
|
51
|
+
return self.convert_output_to_proto(outputs)
|
52
|
+
|
53
|
+
def generate_wrapper(
|
54
|
+
self, request: PostModelOutputsRequest) -> Iterator[service_pb2.MultiOutputResponse]:
|
55
|
+
list_dict_input, inference_params = self.parse_input_request(request)
|
56
|
+
outputs = self.generate(list_dict_input, inference_parameters=inference_params)
|
57
|
+
for output in outputs:
|
58
|
+
yield self.convert_output_to_proto(output)
|
59
|
+
|
60
|
+
def _preprocess_stream(
|
61
|
+
self, request: Iterator[PostModelOutputsRequest]) -> Iterator[Tuple[List[Dict], List[Dict]]]:
|
62
|
+
"""Return generator of processed data (from proto to python) and inference parameters like predict and generate"""
|
63
|
+
for i, req in enumerate(request):
|
64
|
+
input_data, _ = self.parse_input_request(req)
|
65
|
+
yield input_data
|
66
|
+
|
67
|
+
def stream_wrapper(self, request: Iterator[PostModelOutputsRequest]
|
68
|
+
) -> Iterator[service_pb2.MultiOutputResponse]:
|
69
|
+
first_request = next(request)
|
70
|
+
_, inference_params = self.parse_input_request(first_request)
|
71
|
+
request_iterator = itertools.chain([first_request], request)
|
72
|
+
outputs = self.stream(self._preprocess_stream(request_iterator), inference_params)
|
73
|
+
for output in outputs:
|
74
|
+
yield self.convert_output_to_proto(output)
|
75
|
+
|
76
|
+
def predict(self, input_data: List[Dict],
|
77
|
+
inference_parameters: Dict[str, Any] = {}) -> List[OutputDataHandler]:
|
78
|
+
"""
|
79
|
+
Prediction method.
|
80
|
+
|
81
|
+
Args:
|
82
|
+
-----
|
83
|
+
- input_data: is list of dict where key is input type name.
|
84
|
+
* image: np.ndarray
|
85
|
+
* text: str
|
86
|
+
* audio: bytes
|
87
|
+
|
88
|
+
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters.
|
89
|
+
|
90
|
+
Returns:
|
91
|
+
--------
|
92
|
+
List of OutputDataHandler
|
93
|
+
"""
|
94
|
+
raise NotImplementedError
|
95
|
+
|
96
|
+
def generate(self, input_data: List[Dict],
|
97
|
+
inference_parameters: Dict[str, Any] = {}) -> Iterator[List[OutputDataHandler]]:
|
98
|
+
"""
|
99
|
+
Generate method.
|
100
|
+
|
101
|
+
Args:
|
102
|
+
-----
|
103
|
+
- input_data: is list of dict where key is input type name.
|
104
|
+
* image: np.ndarray
|
105
|
+
* text: str
|
106
|
+
* audio: bytes
|
107
|
+
|
108
|
+
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters.
|
109
|
+
|
110
|
+
Yield:
|
111
|
+
--------
|
112
|
+
List of OutputDataHandler
|
113
|
+
"""
|
114
|
+
raise NotImplementedError
|
115
|
+
|
116
|
+
def stream(self, inputs: Iterator[List[Dict[str, Any]]],
|
117
|
+
inference_params: Dict[str, Any]) -> Iterator[List[OutputDataHandler]]:
|
118
|
+
"""
|
119
|
+
Stream method.
|
120
|
+
|
121
|
+
Args:
|
122
|
+
-----
|
123
|
+
input_request: is an Iterator of Tuple which
|
124
|
+
- First element (List[Dict[str, Union[np.ndarray, str, bytes]]]) is list of dict input data type which keys and values are:
|
125
|
+
* image: np.ndarray
|
126
|
+
* text: str
|
127
|
+
* audio: bytes
|
128
|
+
|
129
|
+
- Second element (Dict[str, Union[bool, str, float, int]]): is a dict of inference_parameters
|
130
|
+
|
131
|
+
Yield:
|
132
|
+
--------
|
133
|
+
List of OutputDataHandler
|
134
|
+
"""
|
135
|
+
raise NotImplementedError
|
136
|
+
|
137
|
+
|
138
|
+
class VisualInputModel(AnyAnyModel):
|
139
|
+
|
140
|
+
def parse_input_request(
|
141
|
+
self, input_request: service_pb2.PostModelOutputsRequest) -> Tuple[List[Dict], Dict]:
|
142
|
+
list_input_dict = [
|
143
|
+
InputDataHandler.from_proto(input).image(format="np") for input in input_request.inputs
|
144
|
+
]
|
145
|
+
inference_params = json_format.MessageToDict(
|
146
|
+
input_request.model.model_version.output_info.params)
|
147
|
+
|
148
|
+
return list_input_dict, inference_params
|
149
|
+
|
150
|
+
def load_model(self):
|
151
|
+
"""
|
152
|
+
Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
|
153
|
+
in this method so they are loaded only once for faster inference.
|
154
|
+
"""
|
155
|
+
raise NotImplementedError
|
156
|
+
|
157
|
+
def predict(self, input_data: List[np.ndarray],
|
158
|
+
inference_parameters: Dict[str, Any] = {}) -> List[OutputDataHandler]:
|
159
|
+
"""
|
160
|
+
Prediction method.
|
161
|
+
|
162
|
+
Args:
|
163
|
+
-----
|
164
|
+
- input_data(List[np.ndarray]): is list of image as np.ndarray type
|
165
|
+
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters.
|
166
|
+
|
167
|
+
Returns:
|
168
|
+
--------
|
169
|
+
List of OutputDataHandler
|
170
|
+
"""
|
171
|
+
raise NotImplementedError
|
172
|
+
|
173
|
+
|
174
|
+
class TextInputModel(AnyAnyModel):
|
175
|
+
|
176
|
+
def load_model(self):
|
177
|
+
"""
|
178
|
+
Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
|
179
|
+
in this method so they are loaded only once for faster inference.
|
180
|
+
"""
|
181
|
+
raise NotImplementedError
|
182
|
+
|
183
|
+
def parse_input_request(
|
184
|
+
self, input_request: service_pb2.PostModelOutputsRequest) -> Tuple[List[Dict], Dict]:
|
185
|
+
list_input_text = [InputDataHandler.from_proto(input).text for input in input_request.inputs]
|
186
|
+
inference_params = json_format.MessageToDict(
|
187
|
+
input_request.model.model_version.output_info.params)
|
188
|
+
|
189
|
+
return list_input_text, inference_params
|
190
|
+
|
191
|
+
def predict(self, input_data: List[str],
|
192
|
+
inference_parameters: Dict[str, Any] = {}) -> List[OutputDataHandler]:
|
193
|
+
"""
|
194
|
+
Prediction method.
|
195
|
+
|
196
|
+
Args:
|
197
|
+
-----
|
198
|
+
- input_data(List[str]): is list of text as str type
|
199
|
+
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters.
|
200
|
+
|
201
|
+
Returns:
|
202
|
+
--------
|
203
|
+
List of OutputDataHandler
|
204
|
+
"""
|
205
|
+
raise NotImplementedError
|
206
|
+
|
207
|
+
def generate(self, input_data: List[str],
|
208
|
+
inference_parameters: Dict[str, Any] = {}) -> Iterator[List[OutputDataHandler]]:
|
209
|
+
"""
|
210
|
+
Prediction method.
|
211
|
+
|
212
|
+
Args:
|
213
|
+
-----
|
214
|
+
- input_data(List[str]): is list of text as str type
|
215
|
+
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters.
|
216
|
+
|
217
|
+
Yield:
|
218
|
+
--------
|
219
|
+
List of OutputDataHandler
|
220
|
+
"""
|
221
|
+
raise NotImplementedError
|
222
|
+
|
223
|
+
def stream(self, inputs: Iterator[List[str]],
|
224
|
+
inference_params: Dict[str, Any]) -> Iterator[List[OutputDataHandler]]:
|
225
|
+
"""
|
226
|
+
Stream method.
|
227
|
+
|
228
|
+
Args:
|
229
|
+
-----
|
230
|
+
input_request: is an Iterator of Tuple which
|
231
|
+
- First element (List[str]) is list of input text:
|
232
|
+
- Second element (Dict[str, Union[bool, str, float, int]]): is a dict of inference_parameters
|
233
|
+
|
234
|
+
Yield:
|
235
|
+
--------
|
236
|
+
List of OutputDataHandler
|
237
|
+
"""
|
238
|
+
raise NotImplementedError
|
@@ -14,14 +14,13 @@ from google.protobuf import json_format
|
|
14
14
|
from rich import print
|
15
15
|
from rich.markup import escape
|
16
16
|
|
17
|
-
from clarifai.client
|
17
|
+
from clarifai.client import BaseClient
|
18
18
|
from clarifai.runners.models.model_class import ModelClass
|
19
19
|
from clarifai.runners.utils.const import (
|
20
20
|
AVAILABLE_PYTHON_IMAGES, AVAILABLE_TORCH_IMAGES, CONCEPTS_REQUIRED_MODEL_TYPE,
|
21
21
|
DEFAULT_DOWNLOAD_CHECKPOINT_WHEN, DEFAULT_PYTHON_VERSION, DEFAULT_RUNTIME_DOWNLOAD_PATH,
|
22
22
|
PYTHON_BASE_IMAGE, TORCH_BASE_IMAGE)
|
23
23
|
from clarifai.runners.utils.loader import HuggingFaceLoader
|
24
|
-
from clarifai.runners.utils.method_signatures import signatures_to_yaml
|
25
24
|
from clarifai.urls.helper import ClarifaiUrlHelper
|
26
25
|
from clarifai.utils.logging import logger
|
27
26
|
from clarifai.versions import CLIENT_VERSION
|
@@ -70,18 +69,6 @@ class ModelBuilder:
|
|
70
69
|
"""
|
71
70
|
Create an instance of the model class, as specified in the config file.
|
72
71
|
"""
|
73
|
-
model_class = self.load_model_class()
|
74
|
-
|
75
|
-
# initialize the model
|
76
|
-
model = model_class()
|
77
|
-
if load_model:
|
78
|
-
model.load_model()
|
79
|
-
return model
|
80
|
-
|
81
|
-
def load_model_class(self):
|
82
|
-
"""
|
83
|
-
Import the model class from the model.py file.
|
84
|
-
"""
|
85
72
|
# look for default model.py file location
|
86
73
|
for loc in ["model.py", "1/model.py"]:
|
87
74
|
model_file = os.path.join(self.folder, loc)
|
@@ -120,7 +107,12 @@ class ModelBuilder:
|
|
120
107
|
"Could not determine model class. There should be exactly one model inheriting from ModelClass defined in the model.py"
|
121
108
|
)
|
122
109
|
model_class = classes[0]
|
123
|
-
|
110
|
+
|
111
|
+
# initialize the model
|
112
|
+
model = model_class()
|
113
|
+
if load_model:
|
114
|
+
model.load_model()
|
115
|
+
return model
|
124
116
|
|
125
117
|
def _validate_folder(self, folder):
|
126
118
|
if folder == ".":
|
@@ -261,15 +253,6 @@ class ModelBuilder:
|
|
261
253
|
total_size += member.size
|
262
254
|
return total_size
|
263
255
|
|
264
|
-
def method_signatures_yaml(self):
|
265
|
-
"""
|
266
|
-
Returns the method signatures for the model class in YAML format.
|
267
|
-
"""
|
268
|
-
model_class = self.load_model_class()
|
269
|
-
method_info = model_class._get_method_info()
|
270
|
-
signatures = {name: m.signature for name, m in method_info.items()}
|
271
|
-
return signatures_to_yaml(signatures)
|
272
|
-
|
273
256
|
@property
|
274
257
|
def client(self):
|
275
258
|
if self._client is None:
|
@@ -639,9 +622,9 @@ class ModelBuilder:
|
|
639
622
|
checkpoint_size = self.DEFAULT_CHECKPOINT_SIZE
|
640
623
|
self.storage_request_size += checkpoint_size
|
641
624
|
|
642
|
-
self.maybe_create_model()
|
625
|
+
resp = self.maybe_create_model()
|
643
626
|
if not self.check_model_exists():
|
644
|
-
logger.error(f"Failed to create model: {self.model_proto.id}")
|
627
|
+
logger.error(f"Failed to create model: {self.model_proto.id}. Details: {resp}")
|
645
628
|
sys.exit(1)
|
646
629
|
|
647
630
|
for response in self.client.STUB.PostModelVersionsUpload(
|
@@ -1,269 +1,41 @@
|
|
1
|
-
import
|
2
|
-
import
|
3
|
-
import logging
|
4
|
-
import os
|
5
|
-
import traceback
|
6
|
-
from abc import ABC
|
7
|
-
from typing import Any, Dict, Iterator, List
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from typing import Iterator
|
8
3
|
|
9
|
-
from clarifai_grpc.grpc.api import
|
10
|
-
from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
|
11
|
-
|
12
|
-
from clarifai.runners.utils import data_types
|
13
|
-
from clarifai.runners.utils.method_signatures import (build_function_signature, deserialize,
|
14
|
-
get_stream_from_signature, serialize,
|
15
|
-
signatures_to_json)
|
16
|
-
|
17
|
-
_METHOD_INFO_ATTR = '_cf_method_info'
|
18
|
-
|
19
|
-
_RAISE_EXCEPTIONS = os.getenv("RAISE_EXCEPTIONS", "false").lower() == "true"
|
4
|
+
from clarifai_grpc.grpc.api import service_pb2
|
20
5
|
|
21
6
|
|
22
7
|
class ModelClass(ABC):
|
23
|
-
'''
|
24
|
-
Base class for model classes that can be run as a service.
|
25
|
-
|
26
|
-
Define predict, generate, or stream methods using the @ModelClass.method decorator.
|
27
|
-
|
28
|
-
Example:
|
29
|
-
|
30
|
-
from clarifai.runners.model_class import ModelClass, methods
|
31
|
-
from clarifai.runners.utils.data_types import Input, Stream
|
32
|
-
|
33
|
-
class MyModel(ModelClass):
|
34
|
-
|
35
|
-
@ModelClass.method
|
36
|
-
def predict(self, x: str, y: int) -> List[str]:
|
37
|
-
return [x] * y
|
38
|
-
|
39
|
-
@ModelClass.method
|
40
|
-
def generate(self, x: str, y: int) -> Stream[str]:
|
41
|
-
for i in range(y):
|
42
|
-
yield x + str(i)
|
43
|
-
|
44
|
-
@ModelClass.method
|
45
|
-
def stream(self, input_stream: Stream[Input(x=str, y=int)]) -> Stream[str]:
|
46
|
-
for item in input_stream:
|
47
|
-
yield item.x + ' ' + str(item.y)
|
48
|
-
'''
|
49
|
-
|
50
|
-
@staticmethod
|
51
|
-
def method(func):
|
52
|
-
setattr(func, _METHOD_INFO_ATTR, _MethodInfo(func))
|
53
|
-
return func
|
54
|
-
|
55
|
-
def load_model(self):
|
56
|
-
"""Load the model."""
|
57
|
-
|
58
|
-
def _handle_get_signatures_request(self) -> service_pb2.MultiOutputResponse:
|
59
|
-
methods = self._get_method_info()
|
60
|
-
signatures = {method.name: method.signature for method in methods.values()}
|
61
|
-
resp = service_pb2.MultiOutputResponse(status=status_pb2.Status(code=status_code_pb2.SUCCESS))
|
62
|
-
output = resp.outputs.add()
|
63
|
-
output.status.code = status_code_pb2.SUCCESS
|
64
|
-
output.data.text.raw = signatures_to_json(signatures)
|
65
|
-
return resp
|
66
|
-
|
67
|
-
def _batch_predict(self, method, inputs: List[Dict[str, Any]]) -> List[Any]:
|
68
|
-
"""Batch predict method for multiple inputs."""
|
69
|
-
outputs = []
|
70
|
-
for input in inputs:
|
71
|
-
output = method(**input)
|
72
|
-
outputs.append(output)
|
73
|
-
return outputs
|
74
|
-
|
75
|
-
def _batch_generate(self, method, inputs: List[Dict[str, Any]]) -> Iterator[List[Any]]:
|
76
|
-
"""Batch generate method for multiple inputs."""
|
77
|
-
generators = [method(**input) for input in inputs]
|
78
|
-
for outputs in itertools.zip_longest(*generators):
|
79
|
-
yield outputs
|
80
8
|
|
81
9
|
def predict_wrapper(
|
82
10
|
self, request: service_pb2.PostModelOutputsRequest) -> service_pb2.MultiOutputResponse:
|
83
|
-
|
84
|
-
|
85
|
-
# TODO add method name field to proto
|
86
|
-
method_name = 'predict'
|
87
|
-
if len(request.inputs) > 0 and '_method_name' in request.inputs[0].data.metadata:
|
88
|
-
method_name = request.inputs[0].data.metadata['_method_name']
|
89
|
-
if method_name == '_GET_SIGNATURES': # special case to fetch signatures, TODO add endpoint for this
|
90
|
-
return self._handle_get_signatures_request()
|
91
|
-
if method_name not in self._get_method_info():
|
92
|
-
raise ValueError(f"Method {method_name} not found in model class")
|
93
|
-
method = getattr(self, method_name)
|
94
|
-
method_info = method._cf_method_info
|
95
|
-
signature = method_info.signature
|
96
|
-
python_param_types = method_info.python_param_types
|
97
|
-
inputs = self._convert_input_protos_to_python(request.inputs, signature.inputs,
|
98
|
-
python_param_types)
|
99
|
-
if len(inputs) == 1:
|
100
|
-
inputs = inputs[0]
|
101
|
-
output = method(**inputs)
|
102
|
-
outputs.append(self._convert_output_to_proto(output, signature.outputs))
|
103
|
-
else:
|
104
|
-
outputs = self._batch_predict(method, inputs)
|
105
|
-
outputs = [self._convert_output_to_proto(output, signature.outputs) for output in outputs]
|
106
|
-
|
107
|
-
return service_pb2.MultiOutputResponse(
|
108
|
-
outputs=outputs, status=status_pb2.Status(code=status_code_pb2.SUCCESS))
|
109
|
-
except Exception as e:
|
110
|
-
if _RAISE_EXCEPTIONS:
|
111
|
-
raise
|
112
|
-
logging.exception("Error in predict")
|
113
|
-
return service_pb2.MultiOutputResponse(status=status_pb2.Status(
|
114
|
-
code=status_code_pb2.FAILURE,
|
115
|
-
details=str(e),
|
116
|
-
stack_trace=traceback.format_exc().split('\n')))
|
11
|
+
"""This method is used for input/output proto data conversion"""
|
12
|
+
return self.predict(request)
|
117
13
|
|
118
14
|
def generate_wrapper(self, request: service_pb2.PostModelOutputsRequest
|
119
15
|
) -> Iterator[service_pb2.MultiOutputResponse]:
|
120
|
-
|
121
|
-
|
122
|
-
if len(request.inputs) > 0 and '_method_name' in request.inputs[0].data.metadata:
|
123
|
-
method_name = request.inputs[0].data.metadata['_method_name']
|
124
|
-
method = getattr(self, method_name)
|
125
|
-
method_info = method._cf_method_info
|
126
|
-
signature = method_info.signature
|
127
|
-
python_param_types = method_info.python_param_types
|
128
|
-
|
129
|
-
inputs = self._convert_input_protos_to_python(request.inputs, signature.inputs,
|
130
|
-
python_param_types)
|
131
|
-
if len(inputs) == 1:
|
132
|
-
inputs = inputs[0]
|
133
|
-
for output in method(**inputs):
|
134
|
-
resp = service_pb2.MultiOutputResponse()
|
135
|
-
self._convert_output_to_proto(output, signature.outputs, proto=resp.outputs.add())
|
136
|
-
resp.status.code = status_code_pb2.SUCCESS
|
137
|
-
yield resp
|
138
|
-
else:
|
139
|
-
for outputs in self._batch_generate(method, inputs):
|
140
|
-
resp = service_pb2.MultiOutputResponse()
|
141
|
-
for output in outputs:
|
142
|
-
self._convert_output_to_proto(output, signature.outputs, proto=resp.outputs.add())
|
143
|
-
resp.status.code = status_code_pb2.SUCCESS
|
144
|
-
yield resp
|
145
|
-
except Exception as e:
|
146
|
-
if _RAISE_EXCEPTIONS:
|
147
|
-
raise
|
148
|
-
logging.exception("Error in generate")
|
149
|
-
yield service_pb2.MultiOutputResponse(status=status_pb2.Status(
|
150
|
-
code=status_code_pb2.FAILURE,
|
151
|
-
details=str(e),
|
152
|
-
stack_trace=traceback.format_exc().split('\n')))
|
16
|
+
"""This method is used for input/output proto data conversion and yield outcome"""
|
17
|
+
return self.generate(request)
|
153
18
|
|
154
|
-
def stream_wrapper(self,
|
19
|
+
def stream_wrapper(self, request: service_pb2.PostModelOutputsRequest
|
155
20
|
) -> Iterator[service_pb2.MultiOutputResponse]:
|
156
|
-
|
157
|
-
|
158
|
-
assert len(request.inputs) == 1, "Streaming requires exactly one input"
|
159
|
-
|
160
|
-
method_name = 'generate'
|
161
|
-
if len(request.inputs) > 0 and '_method_name' in request.inputs[0].data.metadata:
|
162
|
-
method_name = request.inputs[0].data.metadata['_method_name']
|
163
|
-
method = getattr(self, method_name)
|
164
|
-
method_info = method._cf_method_info
|
165
|
-
signature = method_info.signature
|
166
|
-
python_param_types = method_info.python_param_types
|
167
|
-
|
168
|
-
# find the streaming vars in the signature
|
169
|
-
stream_sig = get_stream_from_signature(signature.inputs)
|
170
|
-
if stream_sig is None:
|
171
|
-
raise ValueError("Streaming method must have a Stream input")
|
172
|
-
stream_argname = stream_sig.name
|
173
|
-
|
174
|
-
# convert all inputs for the first request, including the first stream value
|
175
|
-
inputs = self._convert_input_protos_to_python(request.inputs, signature.inputs,
|
176
|
-
python_param_types)
|
177
|
-
kwargs = inputs[0]
|
178
|
-
|
179
|
-
# first streaming item
|
180
|
-
first_item = kwargs.pop(stream_argname)
|
181
|
-
|
182
|
-
# streaming generator
|
183
|
-
def InputStream():
|
184
|
-
yield first_item
|
185
|
-
# subsequent streaming items contain only the streaming input
|
186
|
-
for request in request_iterator:
|
187
|
-
item = self._convert_input_protos_to_python(request.inputs, stream_sig,
|
188
|
-
python_param_types)
|
189
|
-
item = item[0][stream_argname]
|
190
|
-
yield item
|
191
|
-
|
192
|
-
# add stream generator back to the input kwargs
|
193
|
-
kwargs[stream_argname] = InputStream()
|
21
|
+
"""This method is used for input/output proto data conversion and yield outcome"""
|
22
|
+
return self.stream(request)
|
194
23
|
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
kwargs = deserialize(input.data, variables_signature)
|
214
|
-
# dynamic cast to annotated types
|
215
|
-
for k, v in kwargs.items():
|
216
|
-
if k not in python_param_types:
|
217
|
-
continue
|
218
|
-
kwargs[k] = data_types.cast(v, python_param_types[k])
|
219
|
-
result.append(kwargs)
|
220
|
-
return result
|
221
|
-
|
222
|
-
def _convert_output_to_proto(self, output: Any, variables_signature,
|
223
|
-
proto=None) -> resources_pb2.Output:
|
224
|
-
if proto is None:
|
225
|
-
proto = resources_pb2.Output()
|
226
|
-
serialize({'return': output}, [variables_signature], proto.data, is_output=True)
|
227
|
-
proto.status.code = status_code_pb2.SUCCESS
|
228
|
-
return proto
|
229
|
-
|
230
|
-
@classmethod
|
231
|
-
def _register_model_methods(cls):
|
232
|
-
# go up the class hierarchy to find all decorated methods, and add to registry of current class
|
233
|
-
methods = {}
|
234
|
-
for base in reversed(cls.__mro__):
|
235
|
-
for name, method in base.__dict__.items():
|
236
|
-
method_info = getattr(method, _METHOD_INFO_ATTR, None)
|
237
|
-
if not method_info: # regular function, not a model method
|
238
|
-
continue
|
239
|
-
methods[name] = method_info
|
240
|
-
# check for generic predict(request) -> response, etc. methods
|
241
|
-
#for name in ('predict', 'generate', 'stream'):
|
242
|
-
# if hasattr(cls, name):
|
243
|
-
# method = getattr(cls, name)
|
244
|
-
# if not hasattr(method, _METHOD_INFO_ATTR): # not already put in registry
|
245
|
-
# methods[name] = _MethodInfo(method)
|
246
|
-
# set method table for this class in the registry
|
247
|
-
return methods
|
248
|
-
|
249
|
-
@classmethod
|
250
|
-
def _get_method_info(cls, func_name=None):
|
251
|
-
if not hasattr(cls, _METHOD_INFO_ATTR):
|
252
|
-
setattr(cls, _METHOD_INFO_ATTR, cls._register_model_methods())
|
253
|
-
method_info = getattr(cls, _METHOD_INFO_ATTR)
|
254
|
-
if func_name:
|
255
|
-
return method_info[func_name]
|
256
|
-
return method_info
|
257
|
-
|
258
|
-
|
259
|
-
class _MethodInfo:
|
260
|
-
|
261
|
-
def __init__(self, method):
|
262
|
-
self.name = method.__name__
|
263
|
-
self.signature = build_function_signature(method)
|
264
|
-
self.python_param_types = {
|
265
|
-
p.name: p.annotation
|
266
|
-
for p in inspect.signature(method).parameters.values()
|
267
|
-
if p.annotation != inspect.Parameter.empty
|
268
|
-
}
|
269
|
-
self.python_param_types.pop('self', None)
|
24
|
+
@abstractmethod
|
25
|
+
def load_model(self):
|
26
|
+
raise NotImplementedError("load_model() not implemented")
|
27
|
+
|
28
|
+
@abstractmethod
|
29
|
+
def predict(self,
|
30
|
+
request: service_pb2.PostModelOutputsRequest) -> service_pb2.MultiOutputResponse:
|
31
|
+
raise NotImplementedError("run_input() not implemented")
|
32
|
+
|
33
|
+
@abstractmethod
|
34
|
+
def generate(self, request: service_pb2.PostModelOutputsRequest
|
35
|
+
) -> Iterator[service_pb2.MultiOutputResponse]:
|
36
|
+
raise NotImplementedError("generate() not implemented")
|
37
|
+
|
38
|
+
@abstractmethod
|
39
|
+
def stream(self, request_iterator: Iterator[service_pb2.PostModelOutputsRequest]
|
40
|
+
) -> Iterator[service_pb2.MultiOutputResponse]:
|
41
|
+
raise NotImplementedError("stream() not implemented")
|