clarifai 10.0.0__py3-none-any.whl → 10.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. clarifai/client/base.py +8 -1
  2. clarifai/client/dataset.py +77 -21
  3. clarifai/client/input.py +6 -6
  4. clarifai/client/model.py +1 -1
  5. clarifai/client/module.py +1 -1
  6. clarifai/client/workflow.py +1 -1
  7. clarifai/datasets/upload/features.py +3 -0
  8. clarifai/datasets/upload/image.py +57 -26
  9. clarifai/datasets/upload/loaders/xview_detection.py +4 -0
  10. clarifai/datasets/upload/utils.py +23 -7
  11. clarifai/models/model_serving/README.md +113 -121
  12. clarifai/models/model_serving/__init__.py +2 -0
  13. clarifai/models/model_serving/cli/_utils.py +53 -0
  14. clarifai/models/model_serving/cli/base.py +14 -0
  15. clarifai/models/model_serving/cli/build.py +79 -0
  16. clarifai/models/model_serving/cli/clarifai_clis.py +33 -0
  17. clarifai/models/model_serving/cli/create.py +171 -0
  18. clarifai/models/model_serving/cli/example_cli.py +34 -0
  19. clarifai/models/model_serving/cli/login.py +26 -0
  20. clarifai/models/model_serving/cli/upload.py +182 -0
  21. clarifai/models/model_serving/constants.py +20 -0
  22. clarifai/models/model_serving/docs/cli.md +150 -0
  23. clarifai/models/model_serving/docs/concepts.md +229 -0
  24. clarifai/models/model_serving/docs/dependencies.md +1 -1
  25. clarifai/models/model_serving/docs/inference_parameters.md +112 -107
  26. clarifai/models/model_serving/docs/model_types.md +16 -17
  27. clarifai/models/model_serving/model_config/__init__.py +4 -2
  28. clarifai/models/model_serving/model_config/base.py +369 -0
  29. clarifai/models/model_serving/model_config/config.py +219 -224
  30. clarifai/models/model_serving/model_config/inference_parameter.py +5 -0
  31. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +25 -24
  32. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +19 -18
  33. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +20 -18
  34. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +19 -18
  35. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +19 -18
  36. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +22 -18
  37. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +32 -28
  38. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +19 -18
  39. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +19 -18
  40. clarifai/models/model_serving/{models → model_config}/output.py +8 -0
  41. clarifai/models/model_serving/model_config/triton/__init__.py +14 -0
  42. clarifai/models/model_serving/model_config/{serializer.py → triton/serializer.py} +3 -1
  43. clarifai/models/model_serving/model_config/triton/triton_config.py +182 -0
  44. clarifai/models/model_serving/{models/model_types.py → model_config/triton/wrappers.py} +4 -4
  45. clarifai/models/model_serving/{models → repo_build}/__init__.py +2 -0
  46. clarifai/models/model_serving/repo_build/build.py +198 -0
  47. clarifai/models/model_serving/repo_build/static_files/_requirements.txt +2 -0
  48. clarifai/models/model_serving/repo_build/static_files/base_test.py +169 -0
  49. clarifai/models/model_serving/repo_build/static_files/inference.py +26 -0
  50. clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +25 -0
  51. clarifai/models/model_serving/repo_build/static_files/test.py +40 -0
  52. clarifai/models/model_serving/{models/pb_model.py → repo_build/static_files/triton/model.py} +15 -14
  53. clarifai/models/model_serving/utils.py +21 -0
  54. clarifai/rag/rag.py +45 -12
  55. clarifai/rag/utils.py +3 -2
  56. clarifai/utils/logging.py +7 -0
  57. clarifai/versions.py +1 -1
  58. {clarifai-10.0.0.dist-info → clarifai-10.1.0.dist-info}/METADATA +28 -5
  59. clarifai-10.1.0.dist-info/RECORD +114 -0
  60. clarifai-10.1.0.dist-info/entry_points.txt +2 -0
  61. clarifai/models/model_serving/cli/deploy_cli.py +0 -123
  62. clarifai/models/model_serving/cli/model_zip.py +0 -61
  63. clarifai/models/model_serving/cli/repository.py +0 -89
  64. clarifai/models/model_serving/docs/custom_config.md +0 -33
  65. clarifai/models/model_serving/docs/output.md +0 -28
  66. clarifai/models/model_serving/models/default_test.py +0 -281
  67. clarifai/models/model_serving/models/inference.py +0 -50
  68. clarifai/models/model_serving/models/test.py +0 -64
  69. clarifai/models/model_serving/pb_model_repository.py +0 -108
  70. clarifai-10.0.0.dist-info/RECORD +0 -103
  71. clarifai-10.0.0.dist-info/entry_points.txt +0 -4
  72. {clarifai-10.0.0.dist-info → clarifai-10.1.0.dist-info}/LICENSE +0 -0
  73. {clarifai-10.0.0.dist-info → clarifai-10.1.0.dist-info}/WHEEL +0 -0
  74. {clarifai-10.0.0.dist-info → clarifai-10.1.0.dist-info}/top_level.txt +0 -0
@@ -1,134 +1,139 @@
1
1
  ## Inference paramaters
2
2
 
3
- When making prediction, you may need to change some paramaters to adjust the result. Those paramaters will be passed through `paramaters()` of a request in triton python model.
3
+ In order to send it to `inference_parameters` of `predict` in `inference.py`, you can define some parameters and they will be visible and adjustable on Clarifai model view.
4
4
 
5
- In order to send it to `**kwargs` of `get_predictions` in `inference.py`, you can define some parameters and they will be visible and adjustable on Clarifai model view.
5
+ This document helps you to understand the concept of inference parameters and how to add it `clarifai_config.yaml`
6
6
 
7
- This document helps you to create your inference parameters that can be visibale and adjustable easily on Clarifai platform. The defined parameters will be sent as `json` file when you use `clarifai-upload-model` cli.
7
+ ## Overview
8
+
9
+ Each paramter has 4 fields:
8
10
 
9
- ### JSON file structure:
10
- The file contains a list of object has 4 fields:
11
11
  * `path` (str): name of your parameter, it must be valid as python variable
12
12
  * `field_type` (int): the parameter data type is one of {1,2,21,3}, it means {boolean, string, encrypted_string, number} respectively. `Number` means `int` or `float`. "Encrypted_string is a string that can be used to store your secrets, like API key. The API will not return the values for this as plaintext.
13
13
  * `default_value`: a default value of the parameter.
14
14
  * `description` (str): short sentence describes what the parameter does
15
15
 
16
- An example of 4 parameters:
17
- ```json
18
- [
19
- {
20
- "path": "boolean_var",
21
- "field_type": 1,
22
- "default_value": true,
23
- "description": "a boolean variable"
24
- },
25
- {
26
- "path": "string_var",
27
- "field_type": 2,
28
- "default_value": "string_1",
29
- "description": "a string variable"
30
- },
31
- {
32
- "path": "number_var",
33
- "field_type": 3,
34
- "default_value": 9.9,
35
- "description": "a float number variable"
36
- },
37
- {
38
- "path": "secret_string_var",
39
- "field_type": 21,
40
- "default_value": "API_KEY",
41
- "description": "a string variable contains secret like API key"
42
- },
43
- ]
16
+ An example of 4 type parameters:
17
+
18
+ ```yaml
19
+ - path: boolean_var
20
+ default_value: true
21
+ field_type: 1
22
+ description: a boolean variable
23
+ - path: string_var
24
+ default_value: "a string"
25
+ field_type: 2
26
+ description: a string variable
27
+ - path: number_var
28
+ default_value: 1
29
+ field_type: 3
30
+ description: a number variable
31
+ - path: secret_string_var
32
+ default_value: "YOUR_SECRET"
33
+ field_type: 21
34
+ description: a string variable contains secret like API key
44
35
  ```
45
36
 
46
- ### Generate JSON file
47
- 1. Manually create the file based on above structure
48
- 2. By code:
49
-
50
- #### 2.1. Fully setup
51
- ```python
52
- from clarifai.models.model_serving.model_config.inference_parameter import InferParamManager, InferParam, InferParamType
53
-
54
- params = [
55
- InferParam(
56
- path="boolean_var",
57
- field_type=InferParamType.BOOL,
58
- default_value=True,
59
- description="a boolean varaiabe"
60
- ),
61
- InferParam(
62
- path="string_var",
63
- field_type=InferParamType.STRING,
64
- default_value="string_1",
65
- description="a string varaiabe"
66
- ),
67
- InferParam(
68
- path="number_var",
69
- field_type=InferParamType.NUMBER,
70
- default_value=9.9,
71
- description="a float number varaiabe"
72
- ),
73
- InferParam(
74
- path=secret_string_var",
75
- field_type=InferParamType.ENCRYPTED_STRING,
76
- default_value="API_KEY",
77
- description="a string variable contains secret like API key"
78
- ),
79
- ]
80
-
81
- ipm = InferParamManager(params=params)
82
- ipm.export("your_file.json")
37
+ ## Add them to the config file
38
+
39
+ For example with 4 sample paramaters above.
40
+
41
+ 1. Manually:
42
+ Insert them to field inference_parameters of the file, e.g.
43
+
44
+ ```yaml
45
+ clarifai_model:
46
+ clarifai_model_id: ''
47
+ clarifai_user_app_id: ''
48
+ description: ''
49
+ inference_parameters:
50
+ - path: boolean_var
51
+ default_value: true
52
+ field_type: 1
53
+ description: a boolean variable
54
+ - path: string_var
55
+ default_value: "a string"
56
+ field_type: 2
57
+ description: a string variable
58
+ - path: number_var
59
+ default_value: 1
60
+ field_type: 3
61
+ description: a number variable
62
+ - path: secret_string_var
63
+ default_value: "YOUR_SECRET"
64
+ field_type: 21
65
+ description: a string variable contains secret like API key
66
+ labels: []
67
+ type: text-to-image
68
+ serving_backend:
69
+ triton:
70
+ ...
83
71
  ```
84
72
 
85
- ##### 2.2. Shorten
86
- `NOTE`: in this way `description` field will be set as empty aka "".
87
- *You need to modify* `description` in order to be able to upload the settings to Clarifai.
88
-
89
- `NOTE`: in this way `ENCRYPTED_STRING` type must be defined with "_" prefix
90
-
91
- ```python
92
- params = dict(boolean_var=True, string_var="string_1", number_var=9.9, _secret_string_var="YOUR_KEY")
93
- ipm = InferParamManager.from_kwargs(**params)
94
- ipm.export("your_file.json")
95
-
96
- ```
73
+ 2. Semi: If you have a large number of fields, adding them one by one with specific field types can be exhaustive and unsafe.
97
74
 
98
- 3. In `test.py`. You can define your paramaters like `2.2. Shorten` in `inference_parameters` attribute of `CustomTestInferenceModel`, the file will be generated when you run the test. Keep in mind to change `description`
75
+ To address this, you can define them as a dictionary, where the key is the path and the value is the default value. Then, inject them into `BaseTest()` in `test.py` within your model repository. For example, suppose your test.py looks like this:
99
76
 
100
- ### Usage
101
- Your defined parameters will be passed through `kwargs` of `InferenceModel.get_predictions` method
102
- in `inference.py`
103
77
  ```python
104
- class InferenceModel:
105
- def __init__():
106
- # initialization
107
- self.model = YourModel()
108
-
109
- @some_wrapper_function
110
- def get_predictions(self, input_data, **kwargs):
111
- # `kwargs` contains your inference parameters
78
+ class CustomTest(unittest.TestCase):
112
79
 
113
- # get a value from kwargs
114
- number_var = kwargs.get("number_var", 9.9)
80
+ def setUp(self) -> None:
81
+ your_infer_parameter = dict()
82
+ self.model = BaseTest(your_infer_parameter)
115
83
 
116
- # pass everything to a function
117
- output = self.model.predict(input_data, **kwargs)
118
-
119
- return SomeOutputType(output)
84
+ def test_default_cases(self):
85
+ self.model.test_with_default_inputs()
120
86
 
121
87
  ```
122
88
 
123
- in `test.py`
89
+ The `BaseTest` class takes inference parameters as a dict, then validating their values and finally save to the config file
90
+ With current samples, the file will turn to
91
+
124
92
  ```python
125
- class CustomTestInferenceModel:
126
- inference_parameters = "" # input a path of json file from `2.1` or a dict from `2.2`
93
+ class CustomTest(unittest.TestCase):
94
+
95
+ def setUp(self) -> None:
96
+ your_infer_parameter = dict(boolean_var=True, string_var="a string", number_var=1, float_number_var=0.1, _secret_string_var="YOUR_SECRET")
97
+ self.model = BaseTest(your_infer_parameter)
127
98
 
128
99
  ...
100
+ ```
129
101
 
130
- def test_something(self):
131
- input = ...
132
- output = self.triton_get_predictions(input, number_var=1, string_var="test", _secret="KEY")
133
- self.assert(...)
102
+ After run `test.py` with pytest. The config file looks like:
103
+
104
+ ```yaml
105
+ clarifai_model:
106
+ clarifai_model_id: ''
107
+ clarifai_user_app_id: ''
108
+ description: ''
109
+ inference_parameters:
110
+ - path: boolean_var
111
+ default_value: true
112
+ field_type: 1
113
+ description: boolean_var
114
+ - path: string_var
115
+ default_value: "a string"
116
+ field_type: 2
117
+ description: string_var
118
+ - path: number_var
119
+ default_value: 1
120
+ field_type: 3
121
+ description: number_var
122
+ - path: float_number_var
123
+ default_value: 0.1
124
+ field_type: 3
125
+ description: float_number_var
126
+ - path: _secret_string_var
127
+ default_value: "YOUR_SECRET"
128
+ field_type: 21
129
+ description: _secret_string_var
130
+ labels: []
131
+ type: text-to-image
132
+ serving_backend:
133
+ triton:
134
+ ...
134
135
  ```
136
+
137
+ > [!Note]
138
+ > * `description` field is set as `path`
139
+ > * For `ENCRYPTED_STRING`, it must be defined with `"_" prefix`
@@ -1,20 +1,19 @@
1
- ## Clarifai Model Types
1
+ Each model type requires different input and output types. The table below illustrates the relationship between supported models and their corresponding input and output types.
2
2
 
3
- Models on the clarifai platform are deployed using the [Triton Inference Server Python Backend](https://github.com/triton-inference-server/python_backend) to allow for pre and post processing of data to and from the model.
3
+ | Type | Input | Output |
4
+ |---------------------|-------------|----------------------|
5
+ | multimodal-embedder | image,text | EmbeddingOutput |
6
+ | text-classifier | text | ClassifierOutput |
7
+ | text-embedder | text | EmbeddingOutput |
8
+ | text-to-image | text | ImageOutput |
9
+ | text-to-text | text | TextOutput |
10
+ | visual-classifier | image | ClassifierOutput |
11
+ | visual-detector | image | VisualDetectorOutput |
12
+ | visual-embedder | image | EmbeddingOutput |
13
+ | visual-segmenter | image | MasksOutput |
4
14
 
5
- Inputs into the models are passed as numpy arrays and the predictions are similarly returned as numpy arrays.
6
- The predictions from user defined models in the [inference script](../README.md#the-inference-script) file have to match certain formats and shapes for the models to be upload compatible.
15
+ Note:
7
16
 
8
- Clarifai [model types](../models/model_types.py) are decorator functions that are responsible for passing input batch requests to user defined inference models to get predictions and format the resultant predictions into Triton Inference responses that are sent by the server for each client inference request.
9
-
10
- ## Supported Model Types Wrapper Functions:
11
-
12
- - visual_detector
13
- - visual_classifier
14
- - text_classifier
15
- - text_to_text
16
- - text_embedder
17
- - text_to_image
18
- - visual_embedder
19
- - visual_segmenter
20
- - multimodal_embedder
17
+ * `image`: single image is RGB np.ndarray with shape of [W, H, 3]
18
+ * `text`: single text is a string in python
19
+ * `multimodal`: has more than one input types
@@ -10,5 +10,7 @@
10
10
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
- from .config import * # noqa # pylint: disable=unused-import
14
- from .serializer import Serializer # noqa # pylint: disable=unused-import
13
+ from .base import * # noqa
14
+ from .config import * # noqa
15
+ from .inference_parameter import InferParam, InferParamManager # noqa
16
+ from .output import * # noqa
@@ -0,0 +1,369 @@
1
+ from typing import Dict, Iterable, List, TypedDict, Union
2
+
3
+ import numpy as np
4
+
5
+ from ..constants import IMAGE_TENSOR_NAME, TEXT_TENSOR_NAME
6
+ from .config import ModelConfigClass, ModelTypes, get_model_config
7
+ from .output import (ClassifierOutput, EmbeddingOutput, ImageOutput, MasksOutput, TextOutput,
8
+ VisualDetectorOutput)
9
+ from .triton import wrappers as triton_wrapper
10
+
11
+
12
+ class _TypeCheckModelOutput(type):
13
+
14
+ def __new__(cls, name, bases, attrs):
15
+ """
16
+ Override child `predict` function with parent._output_type_check(child.predict).
17
+ Aim to check if child.predict returns valid output type
18
+ """
19
+
20
+ def wrap_function(fn_name, base, base_fn, other_fn):
21
+
22
+ def new_fn(_self,
23
+ input_data,
24
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
25
+ # Run child class
26
+ out = other_fn(_self, input_data, inference_parameters=inference_parameters)
27
+ # Run type check
28
+ return base_fn(base, input_data, out)
29
+
30
+ new_fn.__name__ = "wrapped_%s" % fn_name
31
+ new_fn.__doc__ = other_fn.__doc__
32
+ return new_fn
33
+
34
+ if name != "_BaseClarifaiModel":
35
+ attrs["predict"] = wrap_function("predict", bases[0],
36
+ getattr(bases[0], "_output_type_check", lambda: None),
37
+ attrs.setdefault("predict", lambda: None))
38
+
39
+ return type.__new__(cls, name, bases, attrs)
40
+
41
+
42
+ class _BaseClarifaiModel(metaclass=_TypeCheckModelOutput):
43
+ _config: ModelConfigClass = None
44
+
45
+ @property
46
+ def config(self):
47
+ return self._config
48
+
49
+ def _output_type_check(self, input, output):
50
+ output_type = self._config.clarifai_model.output_type
51
+ if isinstance(output, Iterable):
52
+ assert all(
53
+ each.__class__.__name__ == output_type for each in output
54
+ ), f"Expected output is iteration of `{output_type}` type, got iteration `{output}`"
55
+ assert len(output) == len(
56
+ input
57
+ ), f"Input length and output length must be equal, but got input length of {len(input)} and output length of {len(output)}"
58
+ else:
59
+ raise ValueError(f"Expected output is iteration of `{output_type}` type, got `{output}`")
60
+ return output
61
+
62
+ def predict(self,
63
+ input_data: Union[List[np.ndarray], Dict[str, List[np.ndarray]]],
64
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable:
65
+ """
66
+ Prediction method.
67
+
68
+ Args:
69
+ -----
70
+ - input_data: A list of input data item to predict on. The type depends on model input type:
71
+ * `image`: List[np.ndarray]
72
+ * `text`: List[str]
73
+ * `multimodal`:
74
+ input_data is list of dict where key is input type name e.i. `image`, `text` and value is list.
75
+ {"image": List[np.ndarray], "text": List[str]}
76
+
77
+ - inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters.
78
+
79
+ Returns:
80
+ --------
81
+ List of one of the `clarifai.models.model_serving.model_config.output` types. Refer to the README/docs
82
+ """
83
+ raise NotImplementedError
84
+
85
+ def _tritonserver_predict(self,
86
+ input_data,
87
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
88
+ """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
89
+ raise NotImplementedError
90
+
91
+
92
+ _MultiModalInputTypeDict = TypedDict("_MultiModalInputTypeDict", {
93
+ IMAGE_TENSOR_NAME: np.ndarray,
94
+ TEXT_TENSOR_NAME: str
95
+ })
96
+
97
+
98
+ class MultiModalEmbedder(_BaseClarifaiModel):
99
+ _config: ModelConfigClass = get_model_config(ModelTypes.multimodal_embedder)
100
+
101
+ def predict(self,
102
+ input_data: List[_MultiModalInputTypeDict],
103
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
104
+ ) -> Iterable[EmbeddingOutput]:
105
+ """ Custom prediction function for `multimodal-embedder` model.
106
+
107
+ Args:
108
+ input_data (List[_MultiModalInputTypeDict]): List of dict of key-value: `image`(np.ndarray) and `text` (str)
109
+ inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
110
+
111
+ Returns:
112
+ list of EmbeddingOutput
113
+ """
114
+ raise NotImplementedError
115
+
116
+ @triton_wrapper.multimodal_embedder
117
+ def _tritonserver_predict(self,
118
+ input_data,
119
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
120
+ """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
121
+ return self.predict(input_data, inference_parameters=inference_parameters)
122
+
123
+
124
+ class TextClassifier(_BaseClarifaiModel):
125
+ _config: ModelConfigClass = get_model_config(ModelTypes.text_classifier)
126
+
127
+ def predict(self,
128
+ input_data: List[str],
129
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
130
+ ) -> Iterable[ClassifierOutput]:
131
+ """ Custom prediction function for `text-classifier` model.
132
+
133
+ Args:
134
+ input_data (List[str]): List of text
135
+ inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
136
+
137
+ Returns:
138
+ list of ClassifierOutput
139
+ """
140
+ raise NotImplementedError
141
+
142
+ @triton_wrapper.text_classifier
143
+ def _tritonserver_predict(self,
144
+ input_data,
145
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
146
+ return self.predict(input_data, inference_parameters=inference_parameters)
147
+
148
+
149
+ class TextEmbedder(_BaseClarifaiModel):
150
+ _config: ModelConfigClass = get_model_config(ModelTypes.text_embedder)
151
+
152
+ def predict(self,
153
+ input_data: List[str],
154
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
155
+ ) -> Iterable[EmbeddingOutput]:
156
+ """ Custom prediction function for `text-embedder` model.
157
+
158
+ Args:
159
+ input_data (List[str]): List of text
160
+ inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
161
+
162
+ Returns:
163
+ list of EmbeddingOutput
164
+ """
165
+ raise NotImplementedError
166
+
167
+ @triton_wrapper.text_embedder
168
+ def _tritonserver_predict(self,
169
+ input_data,
170
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
171
+ return self.predict(input_data, inference_parameters=inference_parameters)
172
+
173
+
174
+ class TextToImage(_BaseClarifaiModel):
175
+ _config: ModelConfigClass = get_model_config(ModelTypes.text_to_image)
176
+
177
+ def predict(
178
+ self,
179
+ input_data: List[str],
180
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable[ImageOutput]:
181
+ """ Custom prediction function for `text-to-image` model.
182
+
183
+ Args:
184
+ input_data (List[str]): List of text
185
+ inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
186
+
187
+ Returns:
188
+ list of ImageOutput
189
+ """
190
+ raise NotImplementedError
191
+
192
+ @triton_wrapper.text_to_image
193
+ def _tritonserver_predict(self,
194
+ input_data,
195
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
196
+ return self.predict(input_data, inference_parameters=inference_parameters)
197
+
198
+
199
+ class TextToText(_BaseClarifaiModel):
200
+ _config: ModelConfigClass = get_model_config(ModelTypes.text_to_text)
201
+
202
+ def predict(
203
+ self,
204
+ input_data: List[str],
205
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable[TextOutput]:
206
+ """ Custom prediction function for `text-to-text` (also called as `text generation`) model.
207
+
208
+ Args:
209
+ input_data (List[str]): List of text
210
+ inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
211
+
212
+ Returns:
213
+ list of TextOutput
214
+ """
215
+ raise NotImplementedError
216
+
217
+ @triton_wrapper.text_to_text
218
+ def _tritonserver_predict(self,
219
+ input_data,
220
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
221
+ """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
222
+ return self.predict(input_data, inference_parameters=inference_parameters)
223
+
224
+
225
+ class VisualClassifier(_BaseClarifaiModel):
226
+ _config: ModelConfigClass = get_model_config(ModelTypes.visual_classifier)
227
+
228
+ def predict(self,
229
+ input_data: List[np.ndarray],
230
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
231
+ ) -> Iterable[ClassifierOutput]:
232
+ """ Custom prediction function for `visual-classifier` model.
233
+
234
+ Args:
235
+ input_data (List[np.ndarray]): List of image
236
+ inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
237
+
238
+ Returns:
239
+ list of ClassifierOutput
240
+ """
241
+ raise NotImplementedError
242
+
243
+ @triton_wrapper.visual_classifier
244
+ def _tritonserver_predict(self,
245
+ input_data,
246
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
247
+ """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
248
+ return self.predict(input_data, inference_parameters=inference_parameters)
249
+
250
+
251
+ class VisualDetector(_BaseClarifaiModel):
252
+ _config: ModelConfigClass = get_model_config(ModelTypes.visual_detector)
253
+
254
+ def predict(self,
255
+ input_data: List[np.ndarray],
256
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
257
+ ) -> Iterable[VisualDetectorOutput]:
258
+ """ Custom prediction function for `visual-detector` model.
259
+
260
+ Args:
261
+ input_data (List[np.ndarray]): List of image
262
+ inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
263
+
264
+ Returns:
265
+ list of VisualDetectorOutput
266
+ """
267
+ raise NotImplementedError
268
+
269
+ @triton_wrapper.visual_detector
270
+ def _tritonserver_predict(self,
271
+ input_data,
272
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
273
+ """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
274
+ return self.predict(input_data, inference_parameters=inference_parameters)
275
+
276
+ @staticmethod
277
+ def postprocess(width: int,
278
+ height: int,
279
+ labels: list,
280
+ scores: list,
281
+ xyxy_boxes: list,
282
+ max_bbox_count: int = 500) -> VisualDetectorOutput:
283
+ """Convert detection output to Clarifai detector output format
284
+
285
+ Args:
286
+ width (int): image width
287
+ height (int): image height
288
+ labels (list): list of labels
289
+ scores (list): list of scores
290
+ xyxy_boxes (list): list of bounding boxes in x_min, y_min, x_max, y_max format
291
+ max_bbox_count (int, optional): Maximum detection result. Defaults to 500.
292
+
293
+ Returns:
294
+ VisualDetectorOutput
295
+ """
296
+ assert len(labels) == len(scores) == len(
297
+ xyxy_boxes
298
+ ), f"Length of `labels`, `scores` and `bboxes` must be equal, got {len(labels)}, {len(scores)} and {len(xyxy_boxes)} "
299
+ labels = [[each] for each in labels]
300
+ scores = [[each] for each in scores]
301
+ bboxes = [[x[1] / height, x[0] / width, x[3] / height, x[2] / width]
302
+ for x in xyxy_boxes] # normalize the bboxes to [0,1] and [y1 x1 y2 x2]
303
+ bboxes = np.clip(bboxes, 0, 1.)
304
+ if len(bboxes) != 0:
305
+ bboxes = np.concatenate((bboxes, np.zeros((max_bbox_count - len(bboxes), 4))))
306
+ scores = np.concatenate((scores, np.zeros((max_bbox_count - len(scores), 1))))
307
+ labels = np.concatenate((labels, np.zeros((max_bbox_count - len(labels), 1),
308
+ dtype=np.int32)))
309
+ else:
310
+ bboxes = np.zeros((max_bbox_count, 4), dtype=np.float32)
311
+ scores = np.zeros((max_bbox_count, 1), dtype=np.float32)
312
+ labels = np.zeros((max_bbox_count, 1), dtype=np.int32)
313
+
314
+ output = VisualDetectorOutput(
315
+ predicted_bboxes=bboxes, predicted_labels=labels, predicted_scores=scores)
316
+
317
+ return output
318
+
319
+
320
+ class VisualEmbedder(_BaseClarifaiModel):
321
+ _config: ModelConfigClass = get_model_config(ModelTypes.visual_embedder)
322
+
323
+ def predict(self,
324
+ input_data: List[np.ndarray],
325
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
326
+ ) -> Iterable[EmbeddingOutput]:
327
+ """ Custom prediction function for `visual-embedder` model.
328
+
329
+ Args:
330
+ input_data (List[np.ndarray]): List of image
331
+ inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
332
+
333
+ Returns:
334
+ list of EmbeddingOutput
335
+ """
336
+ raise NotImplementedError
337
+
338
+ @triton_wrapper.visual_embedder
339
+ def _tritonserver_predict(self,
340
+ input_data,
341
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
342
+ """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
343
+ return self.predict(input_data, inference_parameters=inference_parameters)
344
+
345
+
346
+ class VisualSegmenter(_BaseClarifaiModel):
347
+ _config: ModelConfigClass = get_model_config(ModelTypes.visual_segmenter)
348
+
349
+ def predict(
350
+ self,
351
+ input_data: List[np.ndarray],
352
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable[MasksOutput]:
353
+ """ Custom prediction function for `visual-segmenter` model.
354
+
355
+ Args:
356
+ input_data (List[np.ndarray]): List of image
357
+ inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
358
+
359
+ Returns:
360
+ list of MasksOutput
361
+ """
362
+ raise NotImplementedError
363
+
364
+ @triton_wrapper.visual_segmenter
365
+ def _tritonserver_predict(self,
366
+ input_data,
367
+ inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
368
+ """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
369
+ return self.predict(input_data, inference_parameters=inference_parameters)