clarifai 10.8.4__py3-none-any.whl → 10.8.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/client/dataset.py +9 -3
- clarifai/constants/dataset.py +1 -1
- clarifai/datasets/upload/base.py +6 -3
- clarifai/datasets/upload/features.py +10 -0
- clarifai/datasets/upload/image.py +22 -13
- clarifai/datasets/upload/multimodal.py +70 -0
- clarifai/datasets/upload/text.py +8 -5
- clarifai/runners/models/model_upload.py +67 -31
- clarifai/runners/utils/loader.py +0 -1
- clarifai/utils/misc.py +6 -0
- {clarifai-10.8.4.dist-info → clarifai-10.8.6.dist-info}/METADATA +2 -1
- {clarifai-10.8.4.dist-info → clarifai-10.8.6.dist-info}/RECORD +17 -60
- clarifai/models/model_serving/README.md +0 -158
- clarifai/models/model_serving/__init__.py +0 -14
- clarifai/models/model_serving/cli/__init__.py +0 -12
- clarifai/models/model_serving/cli/_utils.py +0 -53
- clarifai/models/model_serving/cli/base.py +0 -14
- clarifai/models/model_serving/cli/build.py +0 -79
- clarifai/models/model_serving/cli/clarifai_clis.py +0 -33
- clarifai/models/model_serving/cli/create.py +0 -171
- clarifai/models/model_serving/cli/example_cli.py +0 -34
- clarifai/models/model_serving/cli/login.py +0 -26
- clarifai/models/model_serving/cli/upload.py +0 -183
- clarifai/models/model_serving/constants.py +0 -21
- clarifai/models/model_serving/docs/cli.md +0 -161
- clarifai/models/model_serving/docs/concepts.md +0 -229
- clarifai/models/model_serving/docs/dependencies.md +0 -11
- clarifai/models/model_serving/docs/inference_parameters.md +0 -139
- clarifai/models/model_serving/docs/model_types.md +0 -19
- clarifai/models/model_serving/model_config/__init__.py +0 -16
- clarifai/models/model_serving/model_config/base.py +0 -369
- clarifai/models/model_serving/model_config/config.py +0 -312
- clarifai/models/model_serving/model_config/inference_parameter.py +0 -129
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -25
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -19
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -20
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -19
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -19
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -22
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -32
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -19
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -19
- clarifai/models/model_serving/model_config/output.py +0 -133
- clarifai/models/model_serving/model_config/triton/__init__.py +0 -14
- clarifai/models/model_serving/model_config/triton/serializer.py +0 -136
- clarifai/models/model_serving/model_config/triton/triton_config.py +0 -182
- clarifai/models/model_serving/model_config/triton/wrappers.py +0 -281
- clarifai/models/model_serving/repo_build/__init__.py +0 -14
- clarifai/models/model_serving/repo_build/build.py +0 -198
- clarifai/models/model_serving/repo_build/static_files/_requirements.txt +0 -2
- clarifai/models/model_serving/repo_build/static_files/base_test.py +0 -169
- clarifai/models/model_serving/repo_build/static_files/inference.py +0 -26
- clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +0 -25
- clarifai/models/model_serving/repo_build/static_files/test.py +0 -40
- clarifai/models/model_serving/repo_build/static_files/triton/model.py +0 -75
- clarifai/models/model_serving/utils.py +0 -31
- {clarifai-10.8.4.dist-info → clarifai-10.8.6.dist-info}/LICENSE +0 -0
- {clarifai-10.8.4.dist-info → clarifai-10.8.6.dist-info}/WHEEL +0 -0
- {clarifai-10.8.4.dist-info → clarifai-10.8.6.dist-info}/entry_points.txt +0 -0
- {clarifai-10.8.4.dist-info → clarifai-10.8.6.dist-info}/top_level.txt +0 -0
@@ -1,139 +0,0 @@
|
|
1
|
-
## Inference paramaters
|
2
|
-
|
3
|
-
In order to send it to `inference_parameters` of `predict` in `inference.py`, you can define some parameters and they will be visible and adjustable on Clarifai model view.
|
4
|
-
|
5
|
-
This document helps you to understand the concept of inference parameters and how to add it `clarifai_config.yaml`
|
6
|
-
|
7
|
-
## Overview
|
8
|
-
|
9
|
-
Each paramter has 4 fields:
|
10
|
-
|
11
|
-
* `path` (str): name of your parameter, it must be valid as python variable
|
12
|
-
* `field_type` (int): the parameter data type is one of {1,2,21,3}, it means {boolean, string, encrypted_string, number} respectively. `Number` means `int` or `float`. "Encrypted_string is a string that can be used to store your secrets, like API key. The API will not return the values for this as plaintext.
|
13
|
-
* `default_value`: a default value of the parameter.
|
14
|
-
* `description` (str): short sentence describes what the parameter does
|
15
|
-
|
16
|
-
An example of 4 type parameters:
|
17
|
-
|
18
|
-
```yaml
|
19
|
-
- path: boolean_var
|
20
|
-
default_value: true
|
21
|
-
field_type: 1
|
22
|
-
description: a boolean variable
|
23
|
-
- path: string_var
|
24
|
-
default_value: "a string"
|
25
|
-
field_type: 2
|
26
|
-
description: a string variable
|
27
|
-
- path: number_var
|
28
|
-
default_value: 1
|
29
|
-
field_type: 3
|
30
|
-
description: a number variable
|
31
|
-
- path: secret_string_var
|
32
|
-
default_value: "YOUR_SECRET"
|
33
|
-
field_type: 21
|
34
|
-
description: a string variable contains secret like API key
|
35
|
-
```
|
36
|
-
|
37
|
-
## Add them to the config file
|
38
|
-
|
39
|
-
For example with 4 sample paramaters above.
|
40
|
-
|
41
|
-
1. Manually:
|
42
|
-
Insert them to field inference_parameters of the file, e.g.
|
43
|
-
|
44
|
-
```yaml
|
45
|
-
clarifai_model:
|
46
|
-
clarifai_model_id: ''
|
47
|
-
clarifai_user_app_id: ''
|
48
|
-
description: ''
|
49
|
-
inference_parameters:
|
50
|
-
- path: boolean_var
|
51
|
-
default_value: true
|
52
|
-
field_type: 1
|
53
|
-
description: a boolean variable
|
54
|
-
- path: string_var
|
55
|
-
default_value: "a string"
|
56
|
-
field_type: 2
|
57
|
-
description: a string variable
|
58
|
-
- path: number_var
|
59
|
-
default_value: 1
|
60
|
-
field_type: 3
|
61
|
-
description: a number variable
|
62
|
-
- path: secret_string_var
|
63
|
-
default_value: "YOUR_SECRET"
|
64
|
-
field_type: 21
|
65
|
-
description: a string variable contains secret like API key
|
66
|
-
labels: []
|
67
|
-
type: text-to-image
|
68
|
-
serving_backend:
|
69
|
-
triton:
|
70
|
-
...
|
71
|
-
```
|
72
|
-
|
73
|
-
2. Semi: If you have a large number of fields, adding them one by one with specific field types can be exhaustive and unsafe.
|
74
|
-
|
75
|
-
To address this, you can define them as a dictionary, where the key is the path and the value is the default value. Then, inject them into `BaseTest()` in `test.py` within your model repository. For example, suppose your test.py looks like this:
|
76
|
-
|
77
|
-
```python
|
78
|
-
class CustomTest(unittest.TestCase):
|
79
|
-
|
80
|
-
def setUp(self) -> None:
|
81
|
-
your_infer_parameter = dict()
|
82
|
-
self.model = BaseTest(your_infer_parameter)
|
83
|
-
|
84
|
-
def test_default_cases(self):
|
85
|
-
self.model.test_with_default_inputs()
|
86
|
-
|
87
|
-
```
|
88
|
-
|
89
|
-
The `BaseTest` class takes inference parameters as a dict, then validating their values and finally save to the config file
|
90
|
-
With current samples, the file will turn to
|
91
|
-
|
92
|
-
```python
|
93
|
-
class CustomTest(unittest.TestCase):
|
94
|
-
|
95
|
-
def setUp(self) -> None:
|
96
|
-
your_infer_parameter = dict(boolean_var=True, string_var="a string", number_var=1, float_number_var=0.1, _secret_string_var="YOUR_SECRET")
|
97
|
-
self.model = BaseTest(your_infer_parameter)
|
98
|
-
|
99
|
-
...
|
100
|
-
```
|
101
|
-
|
102
|
-
After run `test.py` with pytest. The config file looks like:
|
103
|
-
|
104
|
-
```yaml
|
105
|
-
clarifai_model:
|
106
|
-
clarifai_model_id: ''
|
107
|
-
clarifai_user_app_id: ''
|
108
|
-
description: ''
|
109
|
-
inference_parameters:
|
110
|
-
- path: boolean_var
|
111
|
-
default_value: true
|
112
|
-
field_type: 1
|
113
|
-
description: boolean_var
|
114
|
-
- path: string_var
|
115
|
-
default_value: "a string"
|
116
|
-
field_type: 2
|
117
|
-
description: string_var
|
118
|
-
- path: number_var
|
119
|
-
default_value: 1
|
120
|
-
field_type: 3
|
121
|
-
description: number_var
|
122
|
-
- path: float_number_var
|
123
|
-
default_value: 0.1
|
124
|
-
field_type: 3
|
125
|
-
description: float_number_var
|
126
|
-
- path: _secret_string_var
|
127
|
-
default_value: "YOUR_SECRET"
|
128
|
-
field_type: 21
|
129
|
-
description: _secret_string_var
|
130
|
-
labels: []
|
131
|
-
type: text-to-image
|
132
|
-
serving_backend:
|
133
|
-
triton:
|
134
|
-
...
|
135
|
-
```
|
136
|
-
|
137
|
-
> [!Note]
|
138
|
-
> * `description` field is set as `path`
|
139
|
-
> * For `ENCRYPTED_STRING`, it must be defined with `"_" prefix`
|
@@ -1,19 +0,0 @@
|
|
1
|
-
Each model type requires different input and output types. The table below illustrates the relationship between supported models and their corresponding input and output types.
|
2
|
-
|
3
|
-
| Type | Input | Output |
|
4
|
-
|---------------------|-------------|----------------------|
|
5
|
-
| multimodal-embedder | image,text | EmbeddingOutput |
|
6
|
-
| text-classifier | text | ClassifierOutput |
|
7
|
-
| text-embedder | text | EmbeddingOutput |
|
8
|
-
| text-to-image | text | ImageOutput |
|
9
|
-
| text-to-text | text | TextOutput |
|
10
|
-
| visual-classifier | image | ClassifierOutput |
|
11
|
-
| visual-detector | image | VisualDetectorOutput |
|
12
|
-
| visual-embedder | image | EmbeddingOutput |
|
13
|
-
| visual-segmenter | image | MasksOutput |
|
14
|
-
|
15
|
-
Note:
|
16
|
-
|
17
|
-
* `image`: single image is RGB np.ndarray with shape of [W, H, 3]
|
18
|
-
* `text`: single text is a string in python
|
19
|
-
* `multimodal`: has more than one input types
|
@@ -1,16 +0,0 @@
|
|
1
|
-
# Copyright 2023 Clarifai, Inc.
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3
|
-
# you may not use this file except in compliance with the License.
|
4
|
-
# You may obtain a copy of the License at
|
5
|
-
#
|
6
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7
|
-
#
|
8
|
-
# Unless required by applicable law or agreed to in writing, software
|
9
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11
|
-
# See the License for the specific language governing permissions and
|
12
|
-
# limitations under the License.
|
13
|
-
from .base import * # noqa
|
14
|
-
from .config import * # noqa
|
15
|
-
from .inference_parameter import InferParam, InferParamManager # noqa
|
16
|
-
from .output import * # noqa
|
@@ -1,369 +0,0 @@
|
|
1
|
-
from typing import Dict, Iterable, List, TypedDict, Union
|
2
|
-
|
3
|
-
import numpy as np
|
4
|
-
|
5
|
-
from ..constants import IMAGE_TENSOR_NAME, TEXT_TENSOR_NAME
|
6
|
-
from .config import ModelConfigClass, ModelTypes, get_model_config
|
7
|
-
from .output import (ClassifierOutput, EmbeddingOutput, ImageOutput, MasksOutput, TextOutput,
|
8
|
-
VisualDetectorOutput)
|
9
|
-
from .triton import wrappers as triton_wrapper
|
10
|
-
|
11
|
-
|
12
|
-
class _TypeCheckModelOutput(type):
|
13
|
-
|
14
|
-
def __new__(cls, name, bases, attrs):
|
15
|
-
"""
|
16
|
-
Override child `predict` function with parent._output_type_check(child.predict).
|
17
|
-
Aim to check if child.predict returns valid output type
|
18
|
-
"""
|
19
|
-
|
20
|
-
def wrap_function(fn_name, base, base_fn, other_fn):
|
21
|
-
|
22
|
-
def new_fn(_self,
|
23
|
-
input_data,
|
24
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
25
|
-
# Run child class
|
26
|
-
out = other_fn(_self, input_data, inference_parameters=inference_parameters)
|
27
|
-
# Run type check
|
28
|
-
return base_fn(base, input_data, out)
|
29
|
-
|
30
|
-
new_fn.__name__ = "wrapped_%s" % fn_name
|
31
|
-
new_fn.__doc__ = other_fn.__doc__
|
32
|
-
return new_fn
|
33
|
-
|
34
|
-
if name != "_BaseClarifaiModel":
|
35
|
-
attrs["predict"] = wrap_function("predict", bases[0],
|
36
|
-
getattr(bases[0], "_output_type_check", lambda: None),
|
37
|
-
attrs.setdefault("predict", lambda: None))
|
38
|
-
|
39
|
-
return type.__new__(cls, name, bases, attrs)
|
40
|
-
|
41
|
-
|
42
|
-
class _BaseClarifaiModel(metaclass=_TypeCheckModelOutput):
|
43
|
-
_config: ModelConfigClass = None
|
44
|
-
|
45
|
-
@property
|
46
|
-
def config(self):
|
47
|
-
return self._config
|
48
|
-
|
49
|
-
def _output_type_check(self, input, output):
|
50
|
-
output_type = self._config.clarifai_model.output_type
|
51
|
-
if isinstance(output, Iterable):
|
52
|
-
assert all(
|
53
|
-
each.__class__.__name__ == output_type for each in output
|
54
|
-
), f"Expected output is iteration of `{output_type}` type, got iteration `{output}`"
|
55
|
-
assert len(output) == len(
|
56
|
-
input
|
57
|
-
), f"Input length and output length must be equal, but got input length of {len(input)} and output length of {len(output)}"
|
58
|
-
else:
|
59
|
-
raise ValueError(f"Expected output is iteration of `{output_type}` type, got `{output}`")
|
60
|
-
return output
|
61
|
-
|
62
|
-
def predict(self,
|
63
|
-
input_data: Union[List[np.ndarray], Dict[str, List[np.ndarray]]],
|
64
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable:
|
65
|
-
"""
|
66
|
-
Prediction method.
|
67
|
-
|
68
|
-
Args:
|
69
|
-
-----
|
70
|
-
- input_data: A list of input data item to predict on. The type depends on model input type:
|
71
|
-
* `image`: List[np.ndarray]
|
72
|
-
* `text`: List[str]
|
73
|
-
* `multimodal`:
|
74
|
-
input_data is list of dict where key is input type name e.i. `image`, `text` and value is list.
|
75
|
-
{"image": List[np.ndarray], "text": List[str]}
|
76
|
-
|
77
|
-
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters.
|
78
|
-
|
79
|
-
Returns:
|
80
|
-
--------
|
81
|
-
List of one of the `clarifai.models.model_serving.model_config.output` types. Refer to the README/docs
|
82
|
-
"""
|
83
|
-
raise NotImplementedError
|
84
|
-
|
85
|
-
def _tritonserver_predict(self,
|
86
|
-
input_data,
|
87
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
88
|
-
""" This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
|
89
|
-
raise NotImplementedError
|
90
|
-
|
91
|
-
|
92
|
-
_MultiModalInputTypeDict = TypedDict("_MultiModalInputTypeDict", {
|
93
|
-
IMAGE_TENSOR_NAME: np.ndarray,
|
94
|
-
TEXT_TENSOR_NAME: str
|
95
|
-
})
|
96
|
-
|
97
|
-
|
98
|
-
class MultiModalEmbedder(_BaseClarifaiModel):
|
99
|
-
_config: ModelConfigClass = get_model_config(ModelTypes.multimodal_embedder)
|
100
|
-
|
101
|
-
def predict(self,
|
102
|
-
input_data: List[_MultiModalInputTypeDict],
|
103
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
|
104
|
-
) -> Iterable[EmbeddingOutput]:
|
105
|
-
""" Custom prediction function for `multimodal-embedder` model.
|
106
|
-
|
107
|
-
Args:
|
108
|
-
input_data (List[_MultiModalInputTypeDict]): List of dict of key-value: `image`(np.ndarray) and `text` (str)
|
109
|
-
inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
|
110
|
-
|
111
|
-
Returns:
|
112
|
-
list of EmbeddingOutput
|
113
|
-
"""
|
114
|
-
raise NotImplementedError
|
115
|
-
|
116
|
-
@triton_wrapper.multimodal_embedder
|
117
|
-
def _tritonserver_predict(self,
|
118
|
-
input_data,
|
119
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
120
|
-
""" This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
|
121
|
-
return self.predict(input_data, inference_parameters=inference_parameters)
|
122
|
-
|
123
|
-
|
124
|
-
class TextClassifier(_BaseClarifaiModel):
|
125
|
-
_config: ModelConfigClass = get_model_config(ModelTypes.text_classifier)
|
126
|
-
|
127
|
-
def predict(self,
|
128
|
-
input_data: List[str],
|
129
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
|
130
|
-
) -> Iterable[ClassifierOutput]:
|
131
|
-
""" Custom prediction function for `text-classifier` model.
|
132
|
-
|
133
|
-
Args:
|
134
|
-
input_data (List[str]): List of text
|
135
|
-
inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
|
136
|
-
|
137
|
-
Returns:
|
138
|
-
list of ClassifierOutput
|
139
|
-
"""
|
140
|
-
raise NotImplementedError
|
141
|
-
|
142
|
-
@triton_wrapper.text_classifier
|
143
|
-
def _tritonserver_predict(self,
|
144
|
-
input_data,
|
145
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
146
|
-
return self.predict(input_data, inference_parameters=inference_parameters)
|
147
|
-
|
148
|
-
|
149
|
-
class TextEmbedder(_BaseClarifaiModel):
|
150
|
-
_config: ModelConfigClass = get_model_config(ModelTypes.text_embedder)
|
151
|
-
|
152
|
-
def predict(self,
|
153
|
-
input_data: List[str],
|
154
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
|
155
|
-
) -> Iterable[EmbeddingOutput]:
|
156
|
-
""" Custom prediction function for `text-embedder` model.
|
157
|
-
|
158
|
-
Args:
|
159
|
-
input_data (List[str]): List of text
|
160
|
-
inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
|
161
|
-
|
162
|
-
Returns:
|
163
|
-
list of EmbeddingOutput
|
164
|
-
"""
|
165
|
-
raise NotImplementedError
|
166
|
-
|
167
|
-
@triton_wrapper.text_embedder
|
168
|
-
def _tritonserver_predict(self,
|
169
|
-
input_data,
|
170
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
171
|
-
return self.predict(input_data, inference_parameters=inference_parameters)
|
172
|
-
|
173
|
-
|
174
|
-
class TextToImage(_BaseClarifaiModel):
|
175
|
-
_config: ModelConfigClass = get_model_config(ModelTypes.text_to_image)
|
176
|
-
|
177
|
-
def predict(
|
178
|
-
self,
|
179
|
-
input_data: List[str],
|
180
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable[ImageOutput]:
|
181
|
-
""" Custom prediction function for `text-to-image` model.
|
182
|
-
|
183
|
-
Args:
|
184
|
-
input_data (List[str]): List of text
|
185
|
-
inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
|
186
|
-
|
187
|
-
Returns:
|
188
|
-
list of ImageOutput
|
189
|
-
"""
|
190
|
-
raise NotImplementedError
|
191
|
-
|
192
|
-
@triton_wrapper.text_to_image
|
193
|
-
def _tritonserver_predict(self,
|
194
|
-
input_data,
|
195
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
196
|
-
return self.predict(input_data, inference_parameters=inference_parameters)
|
197
|
-
|
198
|
-
|
199
|
-
class TextToText(_BaseClarifaiModel):
|
200
|
-
_config: ModelConfigClass = get_model_config(ModelTypes.text_to_text)
|
201
|
-
|
202
|
-
def predict(
|
203
|
-
self,
|
204
|
-
input_data: List[str],
|
205
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable[TextOutput]:
|
206
|
-
""" Custom prediction function for `text-to-text` (also called as `text generation`) model.
|
207
|
-
|
208
|
-
Args:
|
209
|
-
input_data (List[str]): List of text
|
210
|
-
inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
|
211
|
-
|
212
|
-
Returns:
|
213
|
-
list of TextOutput
|
214
|
-
"""
|
215
|
-
raise NotImplementedError
|
216
|
-
|
217
|
-
@triton_wrapper.text_to_text
|
218
|
-
def _tritonserver_predict(self,
|
219
|
-
input_data,
|
220
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
221
|
-
""" This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
|
222
|
-
return self.predict(input_data, inference_parameters=inference_parameters)
|
223
|
-
|
224
|
-
|
225
|
-
class VisualClassifier(_BaseClarifaiModel):
|
226
|
-
_config: ModelConfigClass = get_model_config(ModelTypes.visual_classifier)
|
227
|
-
|
228
|
-
def predict(self,
|
229
|
-
input_data: List[np.ndarray],
|
230
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
|
231
|
-
) -> Iterable[ClassifierOutput]:
|
232
|
-
""" Custom prediction function for `visual-classifier` model.
|
233
|
-
|
234
|
-
Args:
|
235
|
-
input_data (List[np.ndarray]): List of image
|
236
|
-
inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
|
237
|
-
|
238
|
-
Returns:
|
239
|
-
list of ClassifierOutput
|
240
|
-
"""
|
241
|
-
raise NotImplementedError
|
242
|
-
|
243
|
-
@triton_wrapper.visual_classifier
|
244
|
-
def _tritonserver_predict(self,
|
245
|
-
input_data,
|
246
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
247
|
-
""" This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
|
248
|
-
return self.predict(input_data, inference_parameters=inference_parameters)
|
249
|
-
|
250
|
-
|
251
|
-
class VisualDetector(_BaseClarifaiModel):
|
252
|
-
_config: ModelConfigClass = get_model_config(ModelTypes.visual_detector)
|
253
|
-
|
254
|
-
def predict(self,
|
255
|
-
input_data: List[np.ndarray],
|
256
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
|
257
|
-
) -> Iterable[VisualDetectorOutput]:
|
258
|
-
""" Custom prediction function for `visual-detector` model.
|
259
|
-
|
260
|
-
Args:
|
261
|
-
input_data (List[np.ndarray]): List of image
|
262
|
-
inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
|
263
|
-
|
264
|
-
Returns:
|
265
|
-
list of VisualDetectorOutput
|
266
|
-
"""
|
267
|
-
raise NotImplementedError
|
268
|
-
|
269
|
-
@triton_wrapper.visual_detector
|
270
|
-
def _tritonserver_predict(self,
|
271
|
-
input_data,
|
272
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
273
|
-
""" This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
|
274
|
-
return self.predict(input_data, inference_parameters=inference_parameters)
|
275
|
-
|
276
|
-
@staticmethod
|
277
|
-
def postprocess(width: int,
|
278
|
-
height: int,
|
279
|
-
labels: list,
|
280
|
-
scores: list,
|
281
|
-
xyxy_boxes: list,
|
282
|
-
max_bbox_count: int = 500) -> VisualDetectorOutput:
|
283
|
-
"""Convert detection output to Clarifai detector output format
|
284
|
-
|
285
|
-
Args:
|
286
|
-
width (int): image width
|
287
|
-
height (int): image height
|
288
|
-
labels (list): list of labels
|
289
|
-
scores (list): list of scores
|
290
|
-
xyxy_boxes (list): list of bounding boxes in x_min, y_min, x_max, y_max format
|
291
|
-
max_bbox_count (int, optional): Maximum detection result. Defaults to 500.
|
292
|
-
|
293
|
-
Returns:
|
294
|
-
VisualDetectorOutput
|
295
|
-
"""
|
296
|
-
assert len(labels) == len(scores) == len(
|
297
|
-
xyxy_boxes
|
298
|
-
), f"Length of `labels`, `scores` and `bboxes` must be equal, got {len(labels)}, {len(scores)} and {len(xyxy_boxes)} "
|
299
|
-
labels = [[each] for each in labels]
|
300
|
-
scores = [[each] for each in scores]
|
301
|
-
bboxes = [[x[1] / height, x[0] / width, x[3] / height, x[2] / width]
|
302
|
-
for x in xyxy_boxes] # normalize the bboxes to [0,1] and [y1 x1 y2 x2]
|
303
|
-
bboxes = np.clip(bboxes, 0, 1.)
|
304
|
-
if len(bboxes) != 0:
|
305
|
-
bboxes = np.concatenate((bboxes, np.zeros((max_bbox_count - len(bboxes), 4))))
|
306
|
-
scores = np.concatenate((scores, np.zeros((max_bbox_count - len(scores), 1))))
|
307
|
-
labels = np.concatenate((labels, np.zeros((max_bbox_count - len(labels), 1),
|
308
|
-
dtype=np.int32)))
|
309
|
-
else:
|
310
|
-
bboxes = np.zeros((max_bbox_count, 4), dtype=np.float32)
|
311
|
-
scores = np.zeros((max_bbox_count, 1), dtype=np.float32)
|
312
|
-
labels = np.zeros((max_bbox_count, 1), dtype=np.int32)
|
313
|
-
|
314
|
-
output = VisualDetectorOutput(
|
315
|
-
predicted_bboxes=bboxes, predicted_labels=labels, predicted_scores=scores)
|
316
|
-
|
317
|
-
return output
|
318
|
-
|
319
|
-
|
320
|
-
class VisualEmbedder(_BaseClarifaiModel):
|
321
|
-
_config: ModelConfigClass = get_model_config(ModelTypes.visual_embedder)
|
322
|
-
|
323
|
-
def predict(self,
|
324
|
-
input_data: List[np.ndarray],
|
325
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
|
326
|
-
) -> Iterable[EmbeddingOutput]:
|
327
|
-
""" Custom prediction function for `visual-embedder` model.
|
328
|
-
|
329
|
-
Args:
|
330
|
-
input_data (List[np.ndarray]): List of image
|
331
|
-
inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
|
332
|
-
|
333
|
-
Returns:
|
334
|
-
list of EmbeddingOutput
|
335
|
-
"""
|
336
|
-
raise NotImplementedError
|
337
|
-
|
338
|
-
@triton_wrapper.visual_embedder
|
339
|
-
def _tritonserver_predict(self,
|
340
|
-
input_data,
|
341
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
342
|
-
""" This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
|
343
|
-
return self.predict(input_data, inference_parameters=inference_parameters)
|
344
|
-
|
345
|
-
|
346
|
-
class VisualSegmenter(_BaseClarifaiModel):
|
347
|
-
_config: ModelConfigClass = get_model_config(ModelTypes.visual_segmenter)
|
348
|
-
|
349
|
-
def predict(
|
350
|
-
self,
|
351
|
-
input_data: List[np.ndarray],
|
352
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable[MasksOutput]:
|
353
|
-
""" Custom prediction function for `visual-segmenter` model.
|
354
|
-
|
355
|
-
Args:
|
356
|
-
input_data (List[np.ndarray]): List of image
|
357
|
-
inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
|
358
|
-
|
359
|
-
Returns:
|
360
|
-
list of MasksOutput
|
361
|
-
"""
|
362
|
-
raise NotImplementedError
|
363
|
-
|
364
|
-
@triton_wrapper.visual_segmenter
|
365
|
-
def _tritonserver_predict(self,
|
366
|
-
input_data,
|
367
|
-
inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
|
368
|
-
""" This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
|
369
|
-
return self.predict(input_data, inference_parameters=inference_parameters)
|