clarifai 9.8.1__py3-none-any.whl → 9.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. clarifai/client/app.py +115 -14
  2. clarifai/client/base.py +11 -4
  3. clarifai/client/dataset.py +8 -3
  4. clarifai/client/input.py +34 -28
  5. clarifai/client/model.py +71 -2
  6. clarifai/client/module.py +4 -2
  7. clarifai/client/runner.py +161 -0
  8. clarifai/client/search.py +173 -0
  9. clarifai/client/user.py +110 -4
  10. clarifai/client/workflow.py +27 -2
  11. clarifai/constants/search.py +2 -0
  12. clarifai/datasets/upload/loaders/xview_detection.py +1 -1
  13. clarifai/models/model_serving/README.md +3 -3
  14. clarifai/models/model_serving/cli/deploy_cli.py +2 -3
  15. clarifai/models/model_serving/cli/repository.py +3 -5
  16. clarifai/models/model_serving/constants.py +1 -5
  17. clarifai/models/model_serving/docs/custom_config.md +5 -6
  18. clarifai/models/model_serving/docs/dependencies.md +5 -10
  19. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +1 -0
  20. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +1 -0
  21. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +1 -0
  22. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +1 -0
  23. clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +1 -1
  24. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +1 -0
  25. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +1 -0
  26. clarifai/models/model_serving/model_config/__init__.py +2 -0
  27. clarifai/models/model_serving/model_config/config.py +298 -0
  28. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +18 -0
  29. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +18 -0
  30. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +18 -0
  31. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +18 -0
  32. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +18 -0
  33. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +28 -0
  34. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +18 -0
  35. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +18 -0
  36. clarifai/models/model_serving/model_config/serializer.py +1 -1
  37. clarifai/models/model_serving/models/default_test.py +22 -21
  38. clarifai/models/model_serving/models/output.py +2 -2
  39. clarifai/models/model_serving/pb_model_repository.py +2 -5
  40. clarifai/runners/__init__.py +0 -0
  41. clarifai/runners/example.py +33 -0
  42. clarifai/schema/search.py +60 -0
  43. clarifai/utils/logging.py +53 -3
  44. clarifai/versions.py +1 -1
  45. clarifai/workflows/__init__.py +0 -0
  46. clarifai/workflows/export.py +68 -0
  47. clarifai/workflows/utils.py +59 -0
  48. clarifai/workflows/validate.py +67 -0
  49. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/METADATA +20 -2
  50. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/RECORD +102 -86
  51. clarifai_utils/client/app.py +115 -14
  52. clarifai_utils/client/base.py +11 -4
  53. clarifai_utils/client/dataset.py +8 -3
  54. clarifai_utils/client/input.py +34 -28
  55. clarifai_utils/client/model.py +71 -2
  56. clarifai_utils/client/module.py +4 -2
  57. clarifai_utils/client/runner.py +161 -0
  58. clarifai_utils/client/search.py +173 -0
  59. clarifai_utils/client/user.py +110 -4
  60. clarifai_utils/client/workflow.py +27 -2
  61. clarifai_utils/constants/search.py +2 -0
  62. clarifai_utils/datasets/upload/loaders/xview_detection.py +1 -1
  63. clarifai_utils/models/model_serving/README.md +3 -3
  64. clarifai_utils/models/model_serving/cli/deploy_cli.py +2 -3
  65. clarifai_utils/models/model_serving/cli/repository.py +3 -5
  66. clarifai_utils/models/model_serving/constants.py +1 -5
  67. clarifai_utils/models/model_serving/docs/custom_config.md +5 -6
  68. clarifai_utils/models/model_serving/docs/dependencies.md +5 -10
  69. clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +1 -0
  70. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +1 -0
  71. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +1 -0
  72. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +1 -0
  73. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +1 -1
  74. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +1 -0
  75. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +1 -0
  76. clarifai_utils/models/model_serving/model_config/__init__.py +2 -0
  77. clarifai_utils/models/model_serving/model_config/config.py +298 -0
  78. clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +18 -0
  79. clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +18 -0
  80. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +18 -0
  81. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +18 -0
  82. clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +18 -0
  83. clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +28 -0
  84. clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +18 -0
  85. clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +18 -0
  86. clarifai_utils/models/model_serving/model_config/serializer.py +1 -1
  87. clarifai_utils/models/model_serving/models/default_test.py +22 -21
  88. clarifai_utils/models/model_serving/models/output.py +2 -2
  89. clarifai_utils/models/model_serving/pb_model_repository.py +2 -5
  90. clarifai_utils/runners/__init__.py +0 -0
  91. clarifai_utils/runners/example.py +33 -0
  92. clarifai_utils/schema/search.py +60 -0
  93. clarifai_utils/utils/logging.py +53 -3
  94. clarifai_utils/versions.py +1 -1
  95. clarifai_utils/workflows/__init__.py +0 -0
  96. clarifai_utils/workflows/export.py +68 -0
  97. clarifai_utils/workflows/utils.py +59 -0
  98. clarifai_utils/workflows/validate.py +67 -0
  99. clarifai/models/model_serving/envs/triton_conda-cp3.8-torch1.13.1-19f97078.yaml +0 -35
  100. clarifai/models/model_serving/envs/triton_conda-cp3.8-torch2.0.0-ce980f28.yaml +0 -51
  101. clarifai/models/model_serving/examples/image_classification/age_vit/triton_conda.yaml +0 -1
  102. clarifai/models/model_serving/examples/text_classification/xlm-roberta/triton_conda.yaml +0 -1
  103. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/triton_conda.yaml +0 -1
  104. clarifai/models/model_serving/examples/text_to_text/bart-summarize/triton_conda.yaml +0 -1
  105. clarifai/models/model_serving/examples/visual_detection/yolov5x/triton_conda.yaml +0 -1
  106. clarifai/models/model_serving/examples/visual_embedding/vit-base/triton_conda.yaml +0 -1
  107. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/triton_conda.yaml +0 -1
  108. clarifai/models/model_serving/model_config/deploy.py +0 -75
  109. clarifai/models/model_serving/model_config/triton_config.py +0 -226
  110. clarifai_utils/models/model_serving/envs/triton_conda-cp3.8-torch1.13.1-19f97078.yaml +0 -35
  111. clarifai_utils/models/model_serving/envs/triton_conda-cp3.8-torch2.0.0-ce980f28.yaml +0 -51
  112. clarifai_utils/models/model_serving/examples/image_classification/age_vit/triton_conda.yaml +0 -1
  113. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/triton_conda.yaml +0 -1
  114. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/triton_conda.yaml +0 -1
  115. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/triton_conda.yaml +0 -1
  116. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/triton_conda.yaml +0 -1
  117. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/triton_conda.yaml +0 -1
  118. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/triton_conda.yaml +0 -1
  119. clarifai_utils/models/model_serving/model_config/deploy.py +0 -75
  120. clarifai_utils/models/model_serving/model_config/triton_config.py +0 -226
  121. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/LICENSE +0 -0
  122. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/WHEEL +0 -0
  123. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/entry_points.txt +0 -0
  124. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/top_level.txt +0 -0
@@ -1,226 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- """Triton Model Config classes."""
14
-
15
- from dataclasses import dataclass, field
16
- from typing import List
17
-
18
-
19
- @dataclass
20
- class DType:
21
- """
22
- Triton Model Config data types.
23
- """
24
- # https://github.com/triton-inference-server/common/blob/main/protobuf/model_config.proto
25
- TYPE_UINT8: int = 2
26
- TYPE_INT8: int = 6
27
- TYPE_INT16: int = 7
28
- TYPE_INT32: int = 8
29
- TYPE_INT64: int = 9
30
- TYPE_FP16: int = 10
31
- TYPE_FP32: int = 11
32
- TYPE_STRING: int = 13
33
- KIND_GPU: int = 1
34
- KIND_CPU: int = 2
35
-
36
-
37
- @dataclass
38
- class InputConfig:
39
- """
40
- Triton Input definition.
41
- Params:
42
- -------
43
- name: input name
44
- data_type: input data type
45
- dims: Pre-defined input data shape(s).
46
-
47
- Returns:
48
- --------
49
- InputConfig
50
- """
51
- name: str
52
- data_type: int
53
- dims: List = field(default_factory=list)
54
-
55
-
56
- @dataclass
57
- class OutputConfig:
58
- """
59
- Triton Output definition.
60
- Params:
61
- -------
62
- name: output name
63
- data_type: output data type
64
- dims: Pre-defined output data shape(s).
65
- labels (bool): If labels file is required for inference.
66
-
67
- Returns:
68
- --------
69
- OutputConfig
70
- """
71
- name: str
72
- data_type: int
73
- dims: List = field(default_factory=list)
74
- labels: bool = False
75
-
76
- def __post_init__(self):
77
- if self.labels:
78
- self.label_filename = "labels.txt"
79
- else:
80
- del self.labels
81
-
82
-
83
- @dataclass
84
- class Device:
85
- """
86
- Triton instance_group.
87
- Define the type of inference device and number of devices to use.
88
- Params:
89
- -------
90
- count: number of devices
91
- use_gpu: whether to use cpu or gpu.
92
-
93
- Returns:
94
- --------
95
- Device object
96
- """
97
- count: int = 1
98
- use_gpu: bool = True
99
-
100
- def __post_init__(self):
101
- if self.use_gpu:
102
- self.kind: str = DType.KIND_GPU
103
- else:
104
- self.kind: str = DType.KIND_CPU
105
-
106
-
107
- @dataclass
108
- class DynamicBatching:
109
- """
110
- Triton dynamic_batching config.
111
- Params:
112
- -------
113
- preferred_batch_size: batch size
114
- max_queue_delay_microseconds: max queue delay for a request batch
115
-
116
- Returns:
117
- --------
118
- DynamicBatching object
119
- """
120
- #preferred_batch_size: List[int] = [1] # recommended not to set
121
- max_queue_delay_microseconds: int = 500
122
-
123
-
124
- @dataclass
125
- class TritonModelConfig:
126
- """
127
- Triton Model Config base.
128
- Params:
129
- -------
130
- name: triton inference model name
131
- input: a list of an InputConfig field
132
- output: a list of OutputConfig fields/dicts
133
- instance_group: Device. see Device
134
- dynamic_batching: Triton dynamic batching settings.
135
- max_batch_size: max request batch size
136
- backend: Triton Python Backend. Constant
137
-
138
- Returns:
139
- --------
140
- TritonModelConfig
141
- """
142
- model_name: str
143
- model_version: str
144
- model_type: str
145
- image_shape: List #(H, W)
146
- input: List[InputConfig] = field(default_factory=list)
147
- output: List[OutputConfig] = field(default_factory=list)
148
- instance_group: Device = field(default_factory=Device)
149
- dynamic_batching: DynamicBatching = field(default_factory=DynamicBatching)
150
- max_batch_size: int = 1
151
- backend: str = "python"
152
-
153
- def __post_init__(self):
154
- """
155
- Set supported input dims and data_types for
156
- a given model_type.
157
- """
158
- MAX_HW_DIM = 1024
159
- if len(self.image_shape) != 2:
160
- raise ValueError(
161
- f"image_shape takes 2 values, Height and Width. Got {len(self.image_shape)} instead.")
162
- if self.image_shape[0] > MAX_HW_DIM or self.image_shape[1] > MAX_HW_DIM:
163
- raise ValueError(
164
- f"H and W each have a maximum value of 1024. Got H: {self.image_shape[0]}, W: {self.image_shape[1]}"
165
- )
166
- image_dims = self.image_shape
167
- image_dims.append(3) # add channel dim
168
- image_input = InputConfig(name="image", data_type=DType.TYPE_UINT8, dims=image_dims)
169
- text_input = InputConfig(name="text", data_type=DType.TYPE_STRING, dims=[1])
170
- # del image_shape as it's a temporary config that's not used by triton
171
- del self.image_shape
172
-
173
- if self.model_type == "visual-detector":
174
- self.input.append(image_input)
175
- pred_bboxes = OutputConfig(name="predicted_bboxes", data_type=DType.TYPE_FP32, dims=[-1, 4])
176
- pred_labels = OutputConfig(
177
- name="predicted_labels", data_type=DType.TYPE_INT32, dims=[-1, 1], labels=True)
178
- del pred_labels.labels
179
- pred_scores = OutputConfig(name="predicted_scores", data_type=DType.TYPE_FP32, dims=[-1, 1])
180
- self.output.extend([pred_bboxes, pred_labels, pred_scores])
181
-
182
- elif self.model_type == "visual-classifier":
183
- self.input.append(image_input)
184
- pred_labels = OutputConfig(
185
- name="softmax_predictions", data_type=DType.TYPE_FP32, dims=[-1], labels=True)
186
- del pred_labels.labels
187
- self.output.append(pred_labels)
188
-
189
- elif self.model_type == "text-classifier":
190
- self.input.append(text_input)
191
- pred_labels = OutputConfig(
192
- name="softmax_predictions", data_type=DType.TYPE_FP32, dims=[-1], labels=True)
193
- #'Len of out list expected to be the number of concepts returned by the model,
194
- # with each value being the confidence for the respective model output.
195
- del pred_labels.labels
196
- self.output.append(pred_labels)
197
-
198
- elif self.model_type == "text-to-text":
199
- self.input.append(text_input)
200
- pred_text = OutputConfig(name="text", data_type=DType.TYPE_STRING, dims=[1], labels=False)
201
- self.output.append(pred_text)
202
-
203
- elif self.model_type == "text-embedder":
204
- self.input.append(text_input)
205
- embedding_vector = OutputConfig(
206
- name="embeddings", data_type=DType.TYPE_FP32, dims=[-1], labels=False)
207
- self.output.append(embedding_vector)
208
-
209
- elif self.model_type == "text-to-image":
210
- self.input.append(text_input)
211
- gen_image = OutputConfig(
212
- name="image", data_type=DType.TYPE_UINT8, dims=[-1, -1, 3], labels=False)
213
- self.output.append(gen_image)
214
-
215
- elif self.model_type == "visual-embedder":
216
- self.input.append(image_input)
217
- embedding_vector = OutputConfig(
218
- name="embeddings", data_type=DType.TYPE_FP32, dims=[-1], labels=False)
219
- self.output.append(embedding_vector)
220
-
221
- elif self.model_type == "visual-segmenter":
222
- self.input.append(image_input)
223
- pred_masks = OutputConfig(
224
- name="predicted_mask", data_type=DType.TYPE_INT64, dims=[-1, -1], labels=True)
225
- del pred_masks.labels
226
- self.output.append(pred_masks)
@@ -1,35 +0,0 @@
1
- name: triton_conda-cp3.8-torch1.13.1-19f97078
2
- channels:
3
- - conda-forge
4
- dependencies:
5
- - _libgcc_mutex=0.1=conda_forge
6
- - _openmp_mutex=4.5=2_gnu
7
- - bzip2=1.0.8=h7f98852_4
8
- - ca-certificates=2022.12.7=ha878542_0
9
- - ld_impl_linux-64=2.40=h41732ed_0
10
- - libffi=3.4.2=h7f98852_5
11
- - libgcc-ng=12.2.0=h65d4601_19
12
- - libgomp=12.2.0=h65d4601_19
13
- - libnsl=2.0.0=h7f98852_0
14
- - libsqlite=3.40.0=h753d276_0
15
- - libuuid=2.32.1=h7f98852_1000
16
- - libzlib=1.2.13=h166bdaf_4
17
- - ncurses=6.3=h27087fc_1
18
- - openssl=3.0.8=h0b41bf4_0
19
- - pip=23.0.1=pyhd8ed1ab_0
20
- - python=3.8.16=he550d4f_1_cpython
21
- - readline=8.1.2=h0f457ee_0
22
- - setuptools=67.4.0=pyhd8ed1ab_0
23
- - tk=8.6.12=h27826a3_0
24
- - wheel=0.38.4=pyhd8ed1ab_0
25
- - xz=5.2.6=h166bdaf_0
26
- - pip:
27
- - numpy==1.24.2
28
- - nvidia-cublas-cu11==11.10.3.66
29
- - nvidia-cuda-nvrtc-cu11==11.7.99
30
- - nvidia-cuda-runtime-cu11==11.7.99
31
- - nvidia-cudnn-cu11==8.5.0.96
32
- - opencv-python==4.7.0.72
33
- - pillow==9.4.0
34
- - torch==1.13.1
35
- - typing-extensions==4.5.0
@@ -1,51 +0,0 @@
1
- name: triton_conda-cp3.8-torch2.0.0-ce980f28
2
- channels:
3
- - conda-forge
4
- dependencies:
5
- - _libgcc_mutex=0.1=conda_forge
6
- - _openmp_mutex=4.5=2_gnu
7
- - bzip2=1.0.8=h7f98852_4
8
- - ca-certificates=2023.5.7=hbcca054_0
9
- - ld_impl_linux-64=2.40=h41732ed_0
10
- - libffi=3.4.2=h7f98852_5
11
- - libgcc-ng=13.1.0=he5830b7_0
12
- - libgomp=13.1.0=he5830b7_0
13
- - libnsl=2.0.0=h7f98852_0
14
- - libsqlite=3.42.0=h2797004_0
15
- - libuuid=2.38.1=h0b41bf4_0
16
- - libzlib=1.2.13=hd590300_5
17
- - ncurses=6.4=hcb278e6_0
18
- - openssl=3.1.1=hd590300_1
19
- - pip=23.1.2=pyhd8ed1ab_0
20
- - python=3.8.17=he550d4f_0_cpython
21
- - readline=8.2=h8228510_1
22
- - setuptools=68.0.0=pyhd8ed1ab_0
23
- - tk=8.6.12=h27826a3_0
24
- - wheel=0.40.0=pyhd8ed1ab_0
25
- - xz=5.2.6=h166bdaf_0
26
- - pip:
27
- - cmake==3.26.4
28
- - filelock==3.12.2
29
- - jinja2==3.1.2
30
- - lit==16.0.6
31
- - markupsafe==2.1.3
32
- - mpmath==1.3.0
33
- - networkx==3.1
34
- - numpy==1.24.2
35
- - nvidia-cublas-cu11==11.10.3.66
36
- - nvidia-cuda-cupti-cu11==11.7.101
37
- - nvidia-cuda-nvrtc-cu11==11.7.99
38
- - nvidia-cuda-runtime-cu11==11.7.99
39
- - nvidia-cudnn-cu11==8.5.0.96
40
- - nvidia-cufft-cu11==10.9.0.58
41
- - nvidia-curand-cu11==10.2.10.91
42
- - nvidia-cusolver-cu11==11.4.0.1
43
- - nvidia-cusparse-cu11==11.7.4.91
44
- - nvidia-nccl-cu11==2.14.3
45
- - nvidia-nvtx-cu11==11.7.91
46
- - opencv-python==4.7.0.72
47
- - pillow==9.4.0
48
- - sympy==1.12
49
- - torch==2.0.0
50
- - triton==2.0.0
51
- - typing-extensions==4.5.0
@@ -1 +0,0 @@
1
- name: triton_conda-cp3.8-torch1.13.1-19f97078
@@ -1 +0,0 @@
1
- name: triton_conda-cp3.8-torch1.13.1-19f97078
@@ -1 +0,0 @@
1
- name: triton_conda-cp3.8-torch1.13.1-19f97078
@@ -1 +0,0 @@
1
- name: triton_conda-cp3.8-torch1.13.1-19f97078
@@ -1 +0,0 @@
1
- name: triton_conda-cp3.8-torch1.13.1-19f97078
@@ -1 +0,0 @@
1
- name: triton_conda-cp3.8-torch1.13.1-19f97078
@@ -1 +0,0 @@
1
- name: triton_conda-cp3.8-torch1.13.1-19f97078
@@ -1,75 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- """Triton Model Config classes."""
14
-
15
- from dataclasses import dataclass, field
16
- from typing import List
17
-
18
-
19
- @dataclass
20
- class ClarifaiFieldsMap:
21
- """
22
- Triton Model Config base.
23
- Params:
24
- -------
25
- model_type
26
-
27
- Returns:
28
- --------
29
- ClarifaiFieldsMap
30
- """
31
- model_type: str
32
- input_fields_map: List = field(default_factory=list)
33
- output_fields_map: List = field(default_factory=list)
34
-
35
- def __post_init__(self):
36
- """
37
- Set mapping of clarifai in/output vs triton in/output
38
- """
39
- text_input_fields = {"text": "text"}
40
- image_input_fields = {"image": "image"}
41
-
42
- embedding_output_fields = {"embeddings": "embeddings"}
43
-
44
- if self.model_type == "visual-detector":
45
- self.input_fields_map = image_input_fields
46
- self.output_fields_map = {
47
- "regions[...].region_info.bounding_box": "predicted_bboxes",
48
- "regions[...].data.concepts[...].id": "predicted_labels",
49
- "regions[...].data.concepts[...].value": "predicted_scores"
50
- }
51
- elif self.model_type == "visual-classifier":
52
- self.input_fields_map = image_input_fields
53
- self.output_fields_map = {"concepts": "softmax_predictions"}
54
- elif self.model_type == "text-classifier":
55
- self.input_fields_map = text_input_fields
56
- self.output_fields_map = {"concepts": "softmax_predictions"}
57
- elif self.model_type == "text-embedder":
58
- self.input_fields_map = text_input_fields
59
- self.output_fields_map = embedding_output_fields
60
- elif self.model_type == "text-to-text":
61
- self.input_fields_map = text_input_fields
62
- # input and output fields are the same for text-to-text
63
- self.output_fields_map = text_input_fields
64
- elif self.model_type == "text-to-image":
65
- self.input_fields_map = text_input_fields
66
- # image output fields match image_input fields
67
- self.output_fields_map = image_input_fields
68
- elif self.model_type == "visual-embedder":
69
- self.input_fields_map = image_input_fields
70
- self.output_fields_map = embedding_output_fields
71
- elif self.model_type == "visual-segmenter":
72
- self.input_fields_map = image_input_fields
73
- self.output_fields_map = {
74
- "regions[...].region_info.mask,regions[...].data.concepts": "predicted_mask"
75
- }
@@ -1,226 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- """Triton Model Config classes."""
14
-
15
- from dataclasses import dataclass, field
16
- from typing import List
17
-
18
-
19
- @dataclass
20
- class DType:
21
- """
22
- Triton Model Config data types.
23
- """
24
- # https://github.com/triton-inference-server/common/blob/main/protobuf/model_config.proto
25
- TYPE_UINT8: int = 2
26
- TYPE_INT8: int = 6
27
- TYPE_INT16: int = 7
28
- TYPE_INT32: int = 8
29
- TYPE_INT64: int = 9
30
- TYPE_FP16: int = 10
31
- TYPE_FP32: int = 11
32
- TYPE_STRING: int = 13
33
- KIND_GPU: int = 1
34
- KIND_CPU: int = 2
35
-
36
-
37
- @dataclass
38
- class InputConfig:
39
- """
40
- Triton Input definition.
41
- Params:
42
- -------
43
- name: input name
44
- data_type: input data type
45
- dims: Pre-defined input data shape(s).
46
-
47
- Returns:
48
- --------
49
- InputConfig
50
- """
51
- name: str
52
- data_type: int
53
- dims: List = field(default_factory=list)
54
-
55
-
56
- @dataclass
57
- class OutputConfig:
58
- """
59
- Triton Output definition.
60
- Params:
61
- -------
62
- name: output name
63
- data_type: output data type
64
- dims: Pre-defined output data shape(s).
65
- labels (bool): If labels file is required for inference.
66
-
67
- Returns:
68
- --------
69
- OutputConfig
70
- """
71
- name: str
72
- data_type: int
73
- dims: List = field(default_factory=list)
74
- labels: bool = False
75
-
76
- def __post_init__(self):
77
- if self.labels:
78
- self.label_filename = "labels.txt"
79
- else:
80
- del self.labels
81
-
82
-
83
- @dataclass
84
- class Device:
85
- """
86
- Triton instance_group.
87
- Define the type of inference device and number of devices to use.
88
- Params:
89
- -------
90
- count: number of devices
91
- use_gpu: whether to use cpu or gpu.
92
-
93
- Returns:
94
- --------
95
- Device object
96
- """
97
- count: int = 1
98
- use_gpu: bool = True
99
-
100
- def __post_init__(self):
101
- if self.use_gpu:
102
- self.kind: str = DType.KIND_GPU
103
- else:
104
- self.kind: str = DType.KIND_CPU
105
-
106
-
107
- @dataclass
108
- class DynamicBatching:
109
- """
110
- Triton dynamic_batching config.
111
- Params:
112
- -------
113
- preferred_batch_size: batch size
114
- max_queue_delay_microseconds: max queue delay for a request batch
115
-
116
- Returns:
117
- --------
118
- DynamicBatching object
119
- """
120
- #preferred_batch_size: List[int] = [1] # recommended not to set
121
- max_queue_delay_microseconds: int = 500
122
-
123
-
124
- @dataclass
125
- class TritonModelConfig:
126
- """
127
- Triton Model Config base.
128
- Params:
129
- -------
130
- name: triton inference model name
131
- input: a list of an InputConfig field
132
- output: a list of OutputConfig fields/dicts
133
- instance_group: Device. see Device
134
- dynamic_batching: Triton dynamic batching settings.
135
- max_batch_size: max request batch size
136
- backend: Triton Python Backend. Constant
137
-
138
- Returns:
139
- --------
140
- TritonModelConfig
141
- """
142
- model_name: str
143
- model_version: str
144
- model_type: str
145
- image_shape: List #(H, W)
146
- input: List[InputConfig] = field(default_factory=list)
147
- output: List[OutputConfig] = field(default_factory=list)
148
- instance_group: Device = field(default_factory=Device)
149
- dynamic_batching: DynamicBatching = field(default_factory=DynamicBatching)
150
- max_batch_size: int = 1
151
- backend: str = "python"
152
-
153
- def __post_init__(self):
154
- """
155
- Set supported input dims and data_types for
156
- a given model_type.
157
- """
158
- MAX_HW_DIM = 1024
159
- if len(self.image_shape) != 2:
160
- raise ValueError(
161
- f"image_shape takes 2 values, Height and Width. Got {len(self.image_shape)} instead.")
162
- if self.image_shape[0] > MAX_HW_DIM or self.image_shape[1] > MAX_HW_DIM:
163
- raise ValueError(
164
- f"H and W each have a maximum value of 1024. Got H: {self.image_shape[0]}, W: {self.image_shape[1]}"
165
- )
166
- image_dims = self.image_shape
167
- image_dims.append(3) # add channel dim
168
- image_input = InputConfig(name="image", data_type=DType.TYPE_UINT8, dims=image_dims)
169
- text_input = InputConfig(name="text", data_type=DType.TYPE_STRING, dims=[1])
170
- # del image_shape as it's a temporary config that's not used by triton
171
- del self.image_shape
172
-
173
- if self.model_type == "visual-detector":
174
- self.input.append(image_input)
175
- pred_bboxes = OutputConfig(name="predicted_bboxes", data_type=DType.TYPE_FP32, dims=[-1, 4])
176
- pred_labels = OutputConfig(
177
- name="predicted_labels", data_type=DType.TYPE_INT32, dims=[-1, 1], labels=True)
178
- del pred_labels.labels
179
- pred_scores = OutputConfig(name="predicted_scores", data_type=DType.TYPE_FP32, dims=[-1, 1])
180
- self.output.extend([pred_bboxes, pred_labels, pred_scores])
181
-
182
- elif self.model_type == "visual-classifier":
183
- self.input.append(image_input)
184
- pred_labels = OutputConfig(
185
- name="softmax_predictions", data_type=DType.TYPE_FP32, dims=[-1], labels=True)
186
- del pred_labels.labels
187
- self.output.append(pred_labels)
188
-
189
- elif self.model_type == "text-classifier":
190
- self.input.append(text_input)
191
- pred_labels = OutputConfig(
192
- name="softmax_predictions", data_type=DType.TYPE_FP32, dims=[-1], labels=True)
193
- #'Len of out list expected to be the number of concepts returned by the model,
194
- # with each value being the confidence for the respective model output.
195
- del pred_labels.labels
196
- self.output.append(pred_labels)
197
-
198
- elif self.model_type == "text-to-text":
199
- self.input.append(text_input)
200
- pred_text = OutputConfig(name="text", data_type=DType.TYPE_STRING, dims=[1], labels=False)
201
- self.output.append(pred_text)
202
-
203
- elif self.model_type == "text-embedder":
204
- self.input.append(text_input)
205
- embedding_vector = OutputConfig(
206
- name="embeddings", data_type=DType.TYPE_FP32, dims=[-1], labels=False)
207
- self.output.append(embedding_vector)
208
-
209
- elif self.model_type == "text-to-image":
210
- self.input.append(text_input)
211
- gen_image = OutputConfig(
212
- name="image", data_type=DType.TYPE_UINT8, dims=[-1, -1, 3], labels=False)
213
- self.output.append(gen_image)
214
-
215
- elif self.model_type == "visual-embedder":
216
- self.input.append(image_input)
217
- embedding_vector = OutputConfig(
218
- name="embeddings", data_type=DType.TYPE_FP32, dims=[-1], labels=False)
219
- self.output.append(embedding_vector)
220
-
221
- elif self.model_type == "visual-segmenter":
222
- self.input.append(image_input)
223
- pred_masks = OutputConfig(
224
- name="predicted_mask", data_type=DType.TYPE_INT64, dims=[-1, -1], labels=True)
225
- del pred_masks.labels
226
- self.output.append(pred_masks)