clarifai 10.8.4__py3-none-any.whl → 10.8.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. clarifai/__init__.py +1 -1
  2. clarifai/client/dataset.py +9 -3
  3. clarifai/constants/dataset.py +1 -1
  4. clarifai/datasets/upload/base.py +6 -3
  5. clarifai/datasets/upload/features.py +10 -0
  6. clarifai/datasets/upload/image.py +22 -13
  7. clarifai/datasets/upload/multimodal.py +70 -0
  8. clarifai/datasets/upload/text.py +8 -5
  9. clarifai/utils/misc.py +6 -0
  10. {clarifai-10.8.4.dist-info → clarifai-10.8.5.dist-info}/METADATA +2 -1
  11. {clarifai-10.8.4.dist-info → clarifai-10.8.5.dist-info}/RECORD +15 -58
  12. clarifai/models/model_serving/README.md +0 -158
  13. clarifai/models/model_serving/__init__.py +0 -14
  14. clarifai/models/model_serving/cli/__init__.py +0 -12
  15. clarifai/models/model_serving/cli/_utils.py +0 -53
  16. clarifai/models/model_serving/cli/base.py +0 -14
  17. clarifai/models/model_serving/cli/build.py +0 -79
  18. clarifai/models/model_serving/cli/clarifai_clis.py +0 -33
  19. clarifai/models/model_serving/cli/create.py +0 -171
  20. clarifai/models/model_serving/cli/example_cli.py +0 -34
  21. clarifai/models/model_serving/cli/login.py +0 -26
  22. clarifai/models/model_serving/cli/upload.py +0 -183
  23. clarifai/models/model_serving/constants.py +0 -21
  24. clarifai/models/model_serving/docs/cli.md +0 -161
  25. clarifai/models/model_serving/docs/concepts.md +0 -229
  26. clarifai/models/model_serving/docs/dependencies.md +0 -11
  27. clarifai/models/model_serving/docs/inference_parameters.md +0 -139
  28. clarifai/models/model_serving/docs/model_types.md +0 -19
  29. clarifai/models/model_serving/model_config/__init__.py +0 -16
  30. clarifai/models/model_serving/model_config/base.py +0 -369
  31. clarifai/models/model_serving/model_config/config.py +0 -312
  32. clarifai/models/model_serving/model_config/inference_parameter.py +0 -129
  33. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -25
  34. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -19
  35. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -20
  36. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -19
  37. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -19
  38. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -22
  39. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -32
  40. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -19
  41. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -19
  42. clarifai/models/model_serving/model_config/output.py +0 -133
  43. clarifai/models/model_serving/model_config/triton/__init__.py +0 -14
  44. clarifai/models/model_serving/model_config/triton/serializer.py +0 -136
  45. clarifai/models/model_serving/model_config/triton/triton_config.py +0 -182
  46. clarifai/models/model_serving/model_config/triton/wrappers.py +0 -281
  47. clarifai/models/model_serving/repo_build/__init__.py +0 -14
  48. clarifai/models/model_serving/repo_build/build.py +0 -198
  49. clarifai/models/model_serving/repo_build/static_files/_requirements.txt +0 -2
  50. clarifai/models/model_serving/repo_build/static_files/base_test.py +0 -169
  51. clarifai/models/model_serving/repo_build/static_files/inference.py +0 -26
  52. clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +0 -25
  53. clarifai/models/model_serving/repo_build/static_files/test.py +0 -40
  54. clarifai/models/model_serving/repo_build/static_files/triton/model.py +0 -75
  55. clarifai/models/model_serving/utils.py +0 -31
  56. {clarifai-10.8.4.dist-info → clarifai-10.8.5.dist-info}/LICENSE +0 -0
  57. {clarifai-10.8.4.dist-info → clarifai-10.8.5.dist-info}/WHEEL +0 -0
  58. {clarifai-10.8.4.dist-info → clarifai-10.8.5.dist-info}/entry_points.txt +0 -0
  59. {clarifai-10.8.4.dist-info → clarifai-10.8.5.dist-info}/top_level.txt +0 -0
@@ -1,369 +0,0 @@
1
- from typing import Dict, Iterable, List, TypedDict, Union
2
-
3
- import numpy as np
4
-
5
- from ..constants import IMAGE_TENSOR_NAME, TEXT_TENSOR_NAME
6
- from .config import ModelConfigClass, ModelTypes, get_model_config
7
- from .output import (ClassifierOutput, EmbeddingOutput, ImageOutput, MasksOutput, TextOutput,
8
- VisualDetectorOutput)
9
- from .triton import wrappers as triton_wrapper
10
-
11
-
12
- class _TypeCheckModelOutput(type):
13
-
14
- def __new__(cls, name, bases, attrs):
15
- """
16
- Override child `predict` function with parent._output_type_check(child.predict).
17
- Aim to check if child.predict returns valid output type
18
- """
19
-
20
- def wrap_function(fn_name, base, base_fn, other_fn):
21
-
22
- def new_fn(_self,
23
- input_data,
24
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
25
- # Run child class
26
- out = other_fn(_self, input_data, inference_parameters=inference_parameters)
27
- # Run type check
28
- return base_fn(base, input_data, out)
29
-
30
- new_fn.__name__ = "wrapped_%s" % fn_name
31
- new_fn.__doc__ = other_fn.__doc__
32
- return new_fn
33
-
34
- if name != "_BaseClarifaiModel":
35
- attrs["predict"] = wrap_function("predict", bases[0],
36
- getattr(bases[0], "_output_type_check", lambda: None),
37
- attrs.setdefault("predict", lambda: None))
38
-
39
- return type.__new__(cls, name, bases, attrs)
40
-
41
-
42
- class _BaseClarifaiModel(metaclass=_TypeCheckModelOutput):
43
- _config: ModelConfigClass = None
44
-
45
- @property
46
- def config(self):
47
- return self._config
48
-
49
- def _output_type_check(self, input, output):
50
- output_type = self._config.clarifai_model.output_type
51
- if isinstance(output, Iterable):
52
- assert all(
53
- each.__class__.__name__ == output_type for each in output
54
- ), f"Expected output is iteration of `{output_type}` type, got iteration `{output}`"
55
- assert len(output) == len(
56
- input
57
- ), f"Input length and output length must be equal, but got input length of {len(input)} and output length of {len(output)}"
58
- else:
59
- raise ValueError(f"Expected output is iteration of `{output_type}` type, got `{output}`")
60
- return output
61
-
62
- def predict(self,
63
- input_data: Union[List[np.ndarray], Dict[str, List[np.ndarray]]],
64
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable:
65
- """
66
- Prediction method.
67
-
68
- Args:
69
- -----
70
- - input_data: A list of input data item to predict on. The type depends on model input type:
71
- * `image`: List[np.ndarray]
72
- * `text`: List[str]
73
- * `multimodal`:
74
- input_data is list of dict where key is input type name e.i. `image`, `text` and value is list.
75
- {"image": List[np.ndarray], "text": List[str]}
76
-
77
- - inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters.
78
-
79
- Returns:
80
- --------
81
- List of one of the `clarifai.models.model_serving.model_config.output` types. Refer to the README/docs
82
- """
83
- raise NotImplementedError
84
-
85
- def _tritonserver_predict(self,
86
- input_data,
87
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
88
- """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
89
- raise NotImplementedError
90
-
91
-
92
- _MultiModalInputTypeDict = TypedDict("_MultiModalInputTypeDict", {
93
- IMAGE_TENSOR_NAME: np.ndarray,
94
- TEXT_TENSOR_NAME: str
95
- })
96
-
97
-
98
- class MultiModalEmbedder(_BaseClarifaiModel):
99
- _config: ModelConfigClass = get_model_config(ModelTypes.multimodal_embedder)
100
-
101
- def predict(self,
102
- input_data: List[_MultiModalInputTypeDict],
103
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
104
- ) -> Iterable[EmbeddingOutput]:
105
- """ Custom prediction function for `multimodal-embedder` model.
106
-
107
- Args:
108
- input_data (List[_MultiModalInputTypeDict]): List of dict of key-value: `image`(np.ndarray) and `text` (str)
109
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
110
-
111
- Returns:
112
- list of EmbeddingOutput
113
- """
114
- raise NotImplementedError
115
-
116
- @triton_wrapper.multimodal_embedder
117
- def _tritonserver_predict(self,
118
- input_data,
119
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
120
- """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
121
- return self.predict(input_data, inference_parameters=inference_parameters)
122
-
123
-
124
- class TextClassifier(_BaseClarifaiModel):
125
- _config: ModelConfigClass = get_model_config(ModelTypes.text_classifier)
126
-
127
- def predict(self,
128
- input_data: List[str],
129
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
130
- ) -> Iterable[ClassifierOutput]:
131
- """ Custom prediction function for `text-classifier` model.
132
-
133
- Args:
134
- input_data (List[str]): List of text
135
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
136
-
137
- Returns:
138
- list of ClassifierOutput
139
- """
140
- raise NotImplementedError
141
-
142
- @triton_wrapper.text_classifier
143
- def _tritonserver_predict(self,
144
- input_data,
145
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
146
- return self.predict(input_data, inference_parameters=inference_parameters)
147
-
148
-
149
- class TextEmbedder(_BaseClarifaiModel):
150
- _config: ModelConfigClass = get_model_config(ModelTypes.text_embedder)
151
-
152
- def predict(self,
153
- input_data: List[str],
154
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
155
- ) -> Iterable[EmbeddingOutput]:
156
- """ Custom prediction function for `text-embedder` model.
157
-
158
- Args:
159
- input_data (List[str]): List of text
160
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
161
-
162
- Returns:
163
- list of EmbeddingOutput
164
- """
165
- raise NotImplementedError
166
-
167
- @triton_wrapper.text_embedder
168
- def _tritonserver_predict(self,
169
- input_data,
170
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
171
- return self.predict(input_data, inference_parameters=inference_parameters)
172
-
173
-
174
- class TextToImage(_BaseClarifaiModel):
175
- _config: ModelConfigClass = get_model_config(ModelTypes.text_to_image)
176
-
177
- def predict(
178
- self,
179
- input_data: List[str],
180
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable[ImageOutput]:
181
- """ Custom prediction function for `text-to-image` model.
182
-
183
- Args:
184
- input_data (List[str]): List of text
185
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
186
-
187
- Returns:
188
- list of ImageOutput
189
- """
190
- raise NotImplementedError
191
-
192
- @triton_wrapper.text_to_image
193
- def _tritonserver_predict(self,
194
- input_data,
195
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
196
- return self.predict(input_data, inference_parameters=inference_parameters)
197
-
198
-
199
- class TextToText(_BaseClarifaiModel):
200
- _config: ModelConfigClass = get_model_config(ModelTypes.text_to_text)
201
-
202
- def predict(
203
- self,
204
- input_data: List[str],
205
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable[TextOutput]:
206
- """ Custom prediction function for `text-to-text` (also called as `text generation`) model.
207
-
208
- Args:
209
- input_data (List[str]): List of text
210
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
211
-
212
- Returns:
213
- list of TextOutput
214
- """
215
- raise NotImplementedError
216
-
217
- @triton_wrapper.text_to_text
218
- def _tritonserver_predict(self,
219
- input_data,
220
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
221
- """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
222
- return self.predict(input_data, inference_parameters=inference_parameters)
223
-
224
-
225
- class VisualClassifier(_BaseClarifaiModel):
226
- _config: ModelConfigClass = get_model_config(ModelTypes.visual_classifier)
227
-
228
- def predict(self,
229
- input_data: List[np.ndarray],
230
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
231
- ) -> Iterable[ClassifierOutput]:
232
- """ Custom prediction function for `visual-classifier` model.
233
-
234
- Args:
235
- input_data (List[np.ndarray]): List of image
236
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
237
-
238
- Returns:
239
- list of ClassifierOutput
240
- """
241
- raise NotImplementedError
242
-
243
- @triton_wrapper.visual_classifier
244
- def _tritonserver_predict(self,
245
- input_data,
246
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
247
- """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
248
- return self.predict(input_data, inference_parameters=inference_parameters)
249
-
250
-
251
- class VisualDetector(_BaseClarifaiModel):
252
- _config: ModelConfigClass = get_model_config(ModelTypes.visual_detector)
253
-
254
- def predict(self,
255
- input_data: List[np.ndarray],
256
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
257
- ) -> Iterable[VisualDetectorOutput]:
258
- """ Custom prediction function for `visual-detector` model.
259
-
260
- Args:
261
- input_data (List[np.ndarray]): List of image
262
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
263
-
264
- Returns:
265
- list of VisualDetectorOutput
266
- """
267
- raise NotImplementedError
268
-
269
- @triton_wrapper.visual_detector
270
- def _tritonserver_predict(self,
271
- input_data,
272
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
273
- """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
274
- return self.predict(input_data, inference_parameters=inference_parameters)
275
-
276
- @staticmethod
277
- def postprocess(width: int,
278
- height: int,
279
- labels: list,
280
- scores: list,
281
- xyxy_boxes: list,
282
- max_bbox_count: int = 500) -> VisualDetectorOutput:
283
- """Convert detection output to Clarifai detector output format
284
-
285
- Args:
286
- width (int): image width
287
- height (int): image height
288
- labels (list): list of labels
289
- scores (list): list of scores
290
- xyxy_boxes (list): list of bounding boxes in x_min, y_min, x_max, y_max format
291
- max_bbox_count (int, optional): Maximum detection result. Defaults to 500.
292
-
293
- Returns:
294
- VisualDetectorOutput
295
- """
296
- assert len(labels) == len(scores) == len(
297
- xyxy_boxes
298
- ), f"Length of `labels`, `scores` and `bboxes` must be equal, got {len(labels)}, {len(scores)} and {len(xyxy_boxes)} "
299
- labels = [[each] for each in labels]
300
- scores = [[each] for each in scores]
301
- bboxes = [[x[1] / height, x[0] / width, x[3] / height, x[2] / width]
302
- for x in xyxy_boxes] # normalize the bboxes to [0,1] and [y1 x1 y2 x2]
303
- bboxes = np.clip(bboxes, 0, 1.)
304
- if len(bboxes) != 0:
305
- bboxes = np.concatenate((bboxes, np.zeros((max_bbox_count - len(bboxes), 4))))
306
- scores = np.concatenate((scores, np.zeros((max_bbox_count - len(scores), 1))))
307
- labels = np.concatenate((labels, np.zeros((max_bbox_count - len(labels), 1),
308
- dtype=np.int32)))
309
- else:
310
- bboxes = np.zeros((max_bbox_count, 4), dtype=np.float32)
311
- scores = np.zeros((max_bbox_count, 1), dtype=np.float32)
312
- labels = np.zeros((max_bbox_count, 1), dtype=np.int32)
313
-
314
- output = VisualDetectorOutput(
315
- predicted_bboxes=bboxes, predicted_labels=labels, predicted_scores=scores)
316
-
317
- return output
318
-
319
-
320
- class VisualEmbedder(_BaseClarifaiModel):
321
- _config: ModelConfigClass = get_model_config(ModelTypes.visual_embedder)
322
-
323
- def predict(self,
324
- input_data: List[np.ndarray],
325
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}
326
- ) -> Iterable[EmbeddingOutput]:
327
- """ Custom prediction function for `visual-embedder` model.
328
-
329
- Args:
330
- input_data (List[np.ndarray]): List of image
331
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
332
-
333
- Returns:
334
- list of EmbeddingOutput
335
- """
336
- raise NotImplementedError
337
-
338
- @triton_wrapper.visual_embedder
339
- def _tritonserver_predict(self,
340
- input_data,
341
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
342
- """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
343
- return self.predict(input_data, inference_parameters=inference_parameters)
344
-
345
-
346
- class VisualSegmenter(_BaseClarifaiModel):
347
- _config: ModelConfigClass = get_model_config(ModelTypes.visual_segmenter)
348
-
349
- def predict(
350
- self,
351
- input_data: List[np.ndarray],
352
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}) -> Iterable[MasksOutput]:
353
- """ Custom prediction function for `visual-segmenter` model.
354
-
355
- Args:
356
- input_data (List[np.ndarray]): List of image
357
- inference_parameters (Dict[str, Union[bool, str, float, int]]): your inference parameters
358
-
359
- Returns:
360
- list of MasksOutput
361
- """
362
- raise NotImplementedError
363
-
364
- @triton_wrapper.visual_segmenter
365
- def _tritonserver_predict(self,
366
- input_data,
367
- inference_parameters: Dict[str, Union[bool, str, float, int]] = {}):
368
- """ This method is invoked within tritonserver, specifically in the model.py of the Python backend. Attempting to execute it outside of the triton environment will result in failure."""
369
- return self.predict(input_data, inference_parameters=inference_parameters)
@@ -1,312 +0,0 @@
1
- import logging
2
- from copy import deepcopy
3
- from dataclasses import asdict, dataclass, field
4
- from typing import Any, List
5
-
6
- import yaml
7
-
8
- from .inference_parameter import InferParam
9
- from .output import * # noqa: F403
10
- from .triton import DType # noqa
11
- from .triton import Device, DynamicBatching, InputConfig, OutputConfig, TritonModelConfig
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
- __all__ = ["ModelTypes", "ModelConfigClass", "MODEL_TYPES", "get_model_config", "load_user_config"]
16
-
17
-
18
- # Clarifai model types
19
- @dataclass
20
- class ModelTypes:
21
- """ All supported Clarifai model type names
22
- """
23
- visual_detector: str = "visual-detector"
24
- visual_classifier: str = "visual-classifier"
25
- text_classifier: str = "text-classifier"
26
- text_to_text: str = "text-to-text"
27
- text_embedder: str = "text-embedder"
28
- text_to_image: str = "text-to-image"
29
- visual_embedder: str = "visual-embedder"
30
- visual_segmenter: str = "visual-segmenter"
31
- multimodal_embedder: str = "multimodal-embedder"
32
-
33
- def __post_init__(self):
34
- self.all = list(asdict(self).values())
35
-
36
- @property
37
- def image_input_models(self):
38
- """ Return list of model types having image as input or one of inputs"""
39
- _visual = [each for each in self.all if each.startswith("visual")]
40
-
41
- return _visual + [self.multimodal_embedder]
42
-
43
-
44
- @dataclass
45
- class FieldMapsConfig:
46
- input_fields_map: dict = field(default_factory=dict)
47
- output_fields_map: dict = field(default_factory=dict)
48
-
49
-
50
- @dataclass
51
- class ServingBackendConfig:
52
- """
53
- """
54
- triton: TritonModelConfig = None
55
-
56
-
57
- @dataclass
58
- class ClarifaiModelConfig:
59
- """Clarifai necessary configs for building/uploading/creation
60
-
61
- Args:
62
- field_maps (FieldMapsConfig): Field maps config
63
- output_type (dataclass): model output type
64
- labels (List[str]): list of concept names
65
- inference_parameters (List[InferParam]): list of inference parameters
66
- clarifai_model_id (str): Clarifai model id on the platform
67
- type (str): one of `MODEL_TYPES`
68
- clarifai_user_app_id (str): User ID and App ID separated by '/', e.g., <user_id>/<app_id>
69
- description (str): model description
70
-
71
- """
72
- field_maps: FieldMapsConfig = None
73
- output_type: str = None
74
- labels: List[str] = field(default_factory=list)
75
- inference_parameters: List[InferParam] = field(default_factory=list)
76
- clarifai_model_id: str = ""
77
- type: str = ""
78
- clarifai_user_app_id: str = ""
79
- description: str = ""
80
-
81
- def _checking(self, var_name: str, var_value: Any):
82
- if var_name == "type":
83
- _model_types = MODEL_TYPES + [""]
84
- assert self.type in _model_types
85
- elif var_name == "clarifai_model_id" and var_value:
86
- # TODO: Must ensure name is valid
87
- pass
88
- elif var_name == "clarifai_user_app_id" and var_value:
89
- _user_app = var_value.split("/")
90
- assert len(_user_app) == 2, ValueError(
91
- f"id must be combination of user_id and app_id separated by `/`, e.g. <user_id>/<app_id>. Got {var_value}"
92
- )
93
- elif var_name == "labels":
94
- if var_value:
95
- assert isinstance(var_value, tuple) or isinstance(
96
- var_value, list), f"labels must be tuple or list, got {type(var_value)}"
97
- var_value = [str(each) for each in var_value]
98
-
99
- return var_value
100
-
101
- def __setattr__(self, __name: str, __value: Any) -> None:
102
- __value = self._checking(__name, __value)
103
-
104
- super().__setattr__(__name, __value)
105
-
106
-
107
- @dataclass
108
- class ModelConfigClass():
109
- """All config of model
110
- Args:
111
- clarifai_model (ClarifaiModelConfig): Clarifai model config
112
- serving_backend (ServingBackendConfig): Custom serving backend config. Only support triton for now
113
- """
114
- clarifai_model: ClarifaiModelConfig
115
- serving_backend: ServingBackendConfig
116
-
117
- def make_triton_model_config(
118
- self,
119
- model_name: str,
120
- model_version: str,
121
- image_shape: List = None,
122
- instance_group: Device = Device(),
123
- dynamic_batching: DynamicBatching = DynamicBatching(),
124
- max_batch_size: int = 1,
125
- backend: str = "python",
126
- ) -> TritonModelConfig:
127
-
128
- return TritonModelConfig(
129
- model_name=model_name,
130
- model_version=model_version,
131
- image_shape=image_shape,
132
- instance_group=instance_group,
133
- dynamic_batching=dynamic_batching,
134
- max_batch_size=max_batch_size,
135
- backend=backend,
136
- input=self.serving_backend.triton.input,
137
- output=self.serving_backend.triton.output)
138
-
139
- def dump_to_user_config(self):
140
- data = asdict(self)
141
- _self = deepcopy(self)
142
- # dump backend
143
- if hasattr(_self.serving_backend, "triton"):
144
- dict_triton_config = asdict(_self.serving_backend.triton)
145
- for k, v in dict_triton_config.items():
146
- if (k == "max_batch_size" and v > 1) \
147
- or (k == "image_shape" and v != [-1, -1] and self.clarifai_model.type in ModelTypes().image_input_models):
148
- continue
149
- else:
150
- data["serving_backend"]["triton"].pop(k, None)
151
-
152
- if not data["serving_backend"]["triton"]:
153
- data["serving_backend"].pop("triton", None)
154
- if not data["serving_backend"]:
155
- data.pop("serving_backend", None)
156
-
157
- # dump clarifai model
158
- data["clarifai_model"].pop("field_maps", None)
159
- data["clarifai_model"].pop("output_type", None)
160
-
161
- return data
162
-
163
- @classmethod
164
- def custom_doc(cls):
165
- msg = f"{cls.__doc__}\nWhere: \n\n"
166
- for k, v in cls.__annotations__.items():
167
- msg += f"* {k}:\n------\n {v.__doc__}\n"
168
- return msg
169
-
170
-
171
- def read_yaml(path: str) -> dict:
172
- with open(path, encoding="utf-8") as f:
173
- config = yaml.safe_load(f) # model dict
174
- return config
175
-
176
-
177
- def parse_config(config: dict):
178
- clarifai_model = config.get("clarifai_model", {})
179
- serving_backend = config.get("serving_backend", {})
180
- if serving_backend:
181
- if serving_backend.get("triton", {}):
182
- # parse triton input/output
183
- triton = serving_backend["triton"]
184
- input_triton_configs = triton.pop("input", {})
185
- triton.update(
186
- dict(input=[
187
- InputConfig(
188
- name=input["name"],
189
- data_type=eval(f"DType.{input['data_type']}") if isinstance(
190
- input['data_type'], str) else input['data_type'],
191
- dims=input["dims"],
192
- optional=input.get("optional", False),
193
- ) for input in input_triton_configs
194
- ]))
195
- output_triton_configs = triton.pop("output", {})
196
- triton.update(
197
- dict(output=[
198
- OutputConfig(
199
- name=output["name"],
200
- data_type=eval(f"DType.{output['data_type']}") if isinstance(
201
- output['data_type'], str) else output['data_type'],
202
- dims=output["dims"],
203
- label_filename=output["label_filename"],
204
- ) for output in output_triton_configs
205
- ]))
206
- serving_backend.update(dict(triton=TritonModelConfig(**triton)))
207
- serving_backend = ServingBackendConfig(**serving_backend)
208
-
209
- # parse field maps for deployment
210
- field_maps = clarifai_model.pop("field_maps", {})
211
- clarifai_model.update(dict(field_maps=FieldMapsConfig(**field_maps)))
212
- # parse inference_parameters
213
- inference_parameters = clarifai_model.pop("inference_parameters", [])
214
- if inference_parameters is None:
215
- inference_parameters = []
216
- clarifai_model.update(
217
- dict(inference_parameters=[InferParam(**each) for each in inference_parameters]))
218
- # parse output type
219
- output_type = clarifai_model.pop("output_type", None)
220
- if output_type:
221
- #if isinstance(output_type, str):
222
- # output_type = eval(output_type)
223
- clarifai_model.update(dict(output_type=output_type))
224
-
225
- clarifai_model = ClarifaiModelConfig(**clarifai_model)
226
-
227
- return ModelConfigClass(clarifai_model=clarifai_model, serving_backend=serving_backend)
228
-
229
-
230
- def get_model_config(model_type: str) -> ModelConfigClass:
231
- """
232
- Get model config by model type
233
-
234
- Args:
235
-
236
- model_type (str): One of field value of ModelTypes
237
-
238
- Return:
239
- ModelConfigClass
240
-
241
- ### Example:
242
- >>> from clarifai.models.model_serving.model_config import get_model_config, ModelTypes
243
- >>> cfg = get_model_config(ModelTypes.text_classifier)
244
- >>> custom_triton_config = cfg.make_triton_model_config(**kwargs)
245
-
246
- """
247
- if model_type == "MODEL_TYPE_PLACEHOLDER":
248
- logger.warning(
249
- "Warning: A placeholder value has been detected for obtaining the model configuration. This will result in empty `ModelConfigClass` object."
250
- )
251
- return ModelConfigClass(clarifai_model=None, serving_backend=None)
252
-
253
- import os
254
- assert model_type in MODEL_TYPES, f"`model_type` must be in {MODEL_TYPES}"
255
- cfg = read_yaml(
256
- os.path.join(os.path.dirname(__file__), "model_types_config", f"{model_type}.yaml"))
257
- cfg = parse_config(cfg)
258
- cfg.clarifai_model.type = model_type
259
- return cfg
260
-
261
-
262
- _model_types = ModelTypes()
263
- MODEL_TYPES = _model_types.all
264
- del _model_types
265
-
266
-
267
- def load_user_config(cfg_path: str) -> ModelConfigClass:
268
- """Read `clarifai_config.yaml` in user working dir
269
-
270
- Args:
271
- cfg_path (str): path to config
272
-
273
- Returns:
274
- ModelConfigClass
275
- """
276
- cfg = read_yaml(cfg_path)
277
- return _ensure_user_config(cfg)
278
-
279
-
280
- def _ensure_user_config(user_config: dict) -> ModelConfigClass:
281
- """Ensure user config with default one
282
-
283
- Args:
284
- user_config (dict): ModelConfigClass as dict
285
-
286
- Raises:
287
- e: Exception when loading user config
288
-
289
- Returns:
290
- ModelConfigClass
291
- """
292
-
293
- try:
294
- user_config_obj: ModelConfigClass = parse_config(user_config)
295
- except Exception as e:
296
- raise e
297
-
298
- default_config = get_model_config(user_config_obj.clarifai_model.type)
299
-
300
- for _model_cfg, value in asdict(user_config_obj.clarifai_model).items():
301
-
302
- if value and _model_cfg != "field_maps":
303
- setattr(default_config.clarifai_model, _model_cfg, value)
304
-
305
- if hasattr(user_config_obj, "serving_backend"):
306
- if hasattr(user_config_obj.serving_backend, "triton"):
307
- if user_config_obj.serving_backend.triton.max_batch_size > 1:
308
- default_config.serving_backend.triton.max_batch_size = user_config_obj.serving_backend.triton.max_batch_size
309
- if user_config_obj.serving_backend.triton.image_shape != [-1, -1]:
310
- default_config.serving_backend.triton.image_shape = user_config_obj.serving_backend.triton.image_shape
311
-
312
- return default_config