clarifai 9.11.1__py3-none-any.whl → 10.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. clarifai/client/input.py +34 -1
  2. clarifai/client/workflow.py +6 -2
  3. clarifai/constants/rag.py +1 -0
  4. clarifai/models/model_serving/README.md +1 -1
  5. clarifai/models/model_serving/models/default_test.py +3 -0
  6. clarifai/rag/__init__.py +3 -0
  7. clarifai/rag/rag.py +261 -0
  8. clarifai/rag/utils.py +102 -0
  9. clarifai/versions.py +1 -1
  10. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/METADATA +16 -3
  11. clarifai-10.0.0.dist-info/RECORD +103 -0
  12. clarifai/models/model_serving/examples/README.md +0 -7
  13. clarifai/models/model_serving/examples/image_classification/README.md +0 -12
  14. clarifai/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
  15. clarifai/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -64
  16. clarifai/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -74
  17. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  18. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  19. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  20. clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  21. clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  22. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  23. clarifai/models/model_serving/examples/multimodal_embedder/README.md +0 -12
  24. clarifai/models/model_serving/examples/multimodal_embedder/clip/1/__init__.py +0 -0
  25. clarifai/models/model_serving/examples/multimodal_embedder/clip/1/inference.py +0 -66
  26. clarifai/models/model_serving/examples/multimodal_embedder/clip/1/model.py +0 -74
  27. clarifai/models/model_serving/examples/multimodal_embedder/clip/1/test.py +0 -64
  28. clarifai/models/model_serving/examples/multimodal_embedder/clip/config.pbtxt +0 -29
  29. clarifai/models/model_serving/examples/multimodal_embedder/clip/requirements.txt +0 -4
  30. clarifai/models/model_serving/examples/text_classification/README.md +0 -12
  31. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
  32. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -62
  33. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -74
  34. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  35. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  36. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  37. clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  38. clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  39. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  40. clarifai/models/model_serving/examples/text_embedding/README.md +0 -12
  41. clarifai/models/model_serving/examples/text_embedding/instructor-xl/1/__init__.py +0 -0
  42. clarifai/models/model_serving/examples/text_embedding/instructor-xl/1/inference.py +0 -63
  43. clarifai/models/model_serving/examples/text_embedding/instructor-xl/1/model.py +0 -74
  44. clarifai/models/model_serving/examples/text_embedding/instructor-xl/1/test.py +0 -64
  45. clarifai/models/model_serving/examples/text_embedding/instructor-xl/config.pbtxt +0 -20
  46. clarifai/models/model_serving/examples/text_embedding/instructor-xl/requirements.txt +0 -9
  47. clarifai/models/model_serving/examples/text_to_image/README.md +0 -10
  48. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  49. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -58
  50. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -74
  51. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  52. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  53. clarifai/models/model_serving/examples/text_to_text/README.md +0 -12
  54. clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  55. clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -59
  56. clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -74
  57. clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  58. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  59. clarifai/models/model_serving/examples/visual_detection/Readme.md +0 -61
  60. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/1/inference.py +0 -96
  61. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/1/model.py +0 -74
  62. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/1/model_store/hub/checkpoints/keep +0 -0
  63. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/1/test.py +0 -62
  64. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/config.pbtxt +0 -35
  65. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/labels.txt +0 -80
  66. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/requirements.txt +0 -3
  67. clarifai/models/model_serving/examples/visual_detection/yolof/1/config/yolof_r50_c5_8x8_1x_coco.py +0 -245
  68. clarifai/models/model_serving/examples/visual_detection/yolof/1/inference.py +0 -90
  69. clarifai/models/model_serving/examples/visual_detection/yolof/1/model.py +0 -74
  70. clarifai/models/model_serving/examples/visual_detection/yolof/1/test.py +0 -64
  71. clarifai/models/model_serving/examples/visual_detection/yolof/config.pbtxt +0 -36
  72. clarifai/models/model_serving/examples/visual_detection/yolof/labels.txt +0 -80
  73. clarifai/models/model_serving/examples/visual_detection/yolof/requirements.txt +0 -8
  74. clarifai/models/model_serving/examples/visual_embedding/README.md +0 -12
  75. clarifai/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  76. clarifai/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -56
  77. clarifai/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -74
  78. clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  79. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  80. clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -12
  81. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  82. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -62
  83. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -74
  84. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  85. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  86. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  87. clarifai/models/model_serving/examples/vllm/Readme.md +0 -12
  88. clarifai/models/model_serving/examples/vllm/example/1/__init__.py +0 -0
  89. clarifai/models/model_serving/examples/vllm/example/1/inference.py +0 -56
  90. clarifai/models/model_serving/examples/vllm/example/1/model.py +0 -74
  91. clarifai/models/model_serving/examples/vllm/example/1/test.py +0 -64
  92. clarifai/models/model_serving/examples/vllm/example/1/weights/keep +0 -0
  93. clarifai/models/model_serving/examples/vllm/example/config.pbtxt +0 -20
  94. clarifai/models/model_serving/examples/vllm/example/requirements.txt +0 -5
  95. clarifai-9.11.1.dist-info/RECORD +0 -182
  96. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/LICENSE +0 -0
  97. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/WHEEL +0 -0
  98. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/entry_points.txt +0 -0
  99. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/top_level.txt +0 -0
@@ -1,245 +0,0 @@
1
- auto_scale_lr = dict(base_batch_size=64, enable=False)
2
- data_root = 'data/coco/'
3
- dataset_type = 'CocoDataset'
4
- default_hooks = dict(
5
- checkpoint=dict(interval=1, type='CheckpointHook'),
6
- logger=dict(interval=50, type='LoggerHook'),
7
- param_scheduler=dict(type='ParamSchedulerHook'),
8
- sampler_seed=dict(type='DistSamplerSeedHook'),
9
- timer=dict(type='IterTimerHook'),
10
- visualization=dict(type='DetVisualizationHook'))
11
- default_scope = 'mmdet'
12
- env_cfg = dict(
13
- cudnn_benchmark=False,
14
- dist_cfg=dict(backend='nccl'),
15
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
16
- file_client_args = dict(backend='disk')
17
- load_from = None
18
- log_level = 'INFO'
19
- log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50)
20
- model = dict(
21
- backbone=dict(
22
- depth=50,
23
- frozen_stages=1,
24
- init_cfg=dict(checkpoint='open-mmlab://detectron/resnet50_caffe', type='Pretrained'),
25
- norm_cfg=dict(requires_grad=False, type='BN'),
26
- norm_eval=True,
27
- num_stages=4,
28
- out_indices=(3,),
29
- style='caffe',
30
- type='ResNet'),
31
- bbox_head=dict(
32
- anchor_generator=dict(
33
- ratios=[
34
- 1.0,
35
- ], scales=[
36
- 1,
37
- 2,
38
- 4,
39
- 8,
40
- 16,
41
- ], strides=[
42
- 32,
43
- ], type='AnchorGenerator'),
44
- bbox_coder=dict(
45
- add_ctr_clamp=True,
46
- ctr_clamp=32,
47
- target_means=[
48
- 0.0,
49
- 0.0,
50
- 0.0,
51
- 0.0,
52
- ],
53
- target_stds=[
54
- 1.0,
55
- 1.0,
56
- 1.0,
57
- 1.0,
58
- ],
59
- type='DeltaXYWHBBoxCoder'),
60
- in_channels=512,
61
- loss_bbox=dict(loss_weight=1.0, type='GIoULoss'),
62
- loss_cls=dict(alpha=0.25, gamma=2.0, loss_weight=1.0, type='FocalLoss', use_sigmoid=True),
63
- num_classes=80,
64
- reg_decoded_bbox=True,
65
- type='YOLOFHead'),
66
- data_preprocessor=dict(
67
- bgr_to_rgb=False,
68
- mean=[
69
- 103.53,
70
- 116.28,
71
- 123.675,
72
- ],
73
- pad_size_divisor=32,
74
- std=[
75
- 1.0,
76
- 1.0,
77
- 1.0,
78
- ],
79
- type='DetDataPreprocessor'),
80
- neck=dict(
81
- block_dilations=[
82
- 2,
83
- 4,
84
- 6,
85
- 8,
86
- ],
87
- block_mid_channels=128,
88
- in_channels=2048,
89
- num_residual_blocks=4,
90
- out_channels=512,
91
- type='DilatedEncoder'),
92
- test_cfg=dict(
93
- max_per_img=100,
94
- min_bbox_size=0,
95
- nms=dict(iou_threshold=0.6, type='nms'),
96
- nms_pre=1000,
97
- score_thr=0.05),
98
- train_cfg=dict(
99
- allowed_border=-1,
100
- assigner=dict(neg_ignore_thr=0.7, pos_ignore_thr=0.15, type='UniformAssigner'),
101
- debug=False,
102
- pos_weight=-1),
103
- type='YOLOF')
104
- optim_wrapper = dict(
105
- optimizer=dict(lr=0.12, momentum=0.9, type='SGD', weight_decay=0.0001),
106
- paramwise_cfg=dict(
107
- custom_keys=dict(backbone=dict(lr_mult=0.3333333333333333)), norm_decay_mult=0.0),
108
- type='OptimWrapper')
109
- param_scheduler = [
110
- dict(begin=0, by_epoch=False, end=1500, start_factor=0.00066667, type='LinearLR'),
111
- dict(begin=0, by_epoch=True, end=12, gamma=0.1, milestones=[
112
- 8,
113
- 11,
114
- ], type='MultiStepLR'),
115
- ]
116
- resume = False
117
- test_cfg = dict(type='TestLoop')
118
- test_dataloader = dict(
119
- batch_size=1,
120
- dataset=dict(
121
- ann_file='annotations/instances_val2017.json',
122
- data_prefix=dict(img='val2017/'),
123
- data_root='data/coco/',
124
- pipeline=[
125
- dict(file_client_args=dict(backend='disk'), type='LoadImageFromFile'),
126
- dict(keep_ratio=True, scale=(
127
- 1333,
128
- 800,
129
- ), type='Resize'),
130
- dict(type='LoadAnnotations', with_bbox=True),
131
- dict(
132
- meta_keys=(
133
- 'img_id',
134
- 'img_path',
135
- 'ori_shape',
136
- 'img_shape',
137
- 'scale_factor',
138
- ),
139
- type='PackDetInputs'),
140
- ],
141
- test_mode=True,
142
- type='CocoDataset'),
143
- drop_last=False,
144
- num_workers=2,
145
- persistent_workers=True,
146
- sampler=dict(shuffle=False, type='DefaultSampler'))
147
- test_evaluator = dict(
148
- ann_file='data/coco/annotations/instances_val2017.json',
149
- format_only=False,
150
- metric='bbox',
151
- type='CocoMetric')
152
- test_pipeline = [
153
- dict(file_client_args=dict(backend='disk'), type='LoadImageFromFile'),
154
- dict(keep_ratio=True, scale=(
155
- 1333,
156
- 800,
157
- ), type='Resize'),
158
- dict(type='LoadAnnotations', with_bbox=True),
159
- dict(
160
- meta_keys=(
161
- 'img_id',
162
- 'img_path',
163
- 'ori_shape',
164
- 'img_shape',
165
- 'scale_factor',
166
- ),
167
- type='PackDetInputs'),
168
- ]
169
- train_cfg = dict(max_epochs=12, type='EpochBasedTrainLoop', val_interval=1)
170
- train_dataloader = dict(
171
- batch_sampler=dict(type='AspectRatioBatchSampler'),
172
- batch_size=8,
173
- dataset=dict(
174
- ann_file='annotations/instances_train2017.json',
175
- data_prefix=dict(img='train2017/'),
176
- data_root='data/coco/',
177
- filter_cfg=dict(filter_empty_gt=True, min_size=32),
178
- pipeline=[
179
- dict(file_client_args=dict(backend='disk'), type='LoadImageFromFile'),
180
- dict(type='LoadAnnotations', with_bbox=True),
181
- dict(keep_ratio=True, scale=(
182
- 1333,
183
- 800,
184
- ), type='Resize'),
185
- dict(prob=0.5, type='RandomFlip'),
186
- dict(max_shift_px=32, prob=0.5, type='RandomShift'),
187
- dict(type='PackDetInputs'),
188
- ],
189
- type='CocoDataset'),
190
- num_workers=8,
191
- persistent_workers=True,
192
- sampler=dict(shuffle=True, type='DefaultSampler'))
193
- train_pipeline = [
194
- dict(file_client_args=dict(backend='disk'), type='LoadImageFromFile'),
195
- dict(type='LoadAnnotations', with_bbox=True),
196
- dict(keep_ratio=True, scale=(
197
- 1333,
198
- 800,
199
- ), type='Resize'),
200
- dict(prob=0.5, type='RandomFlip'),
201
- dict(max_shift_px=32, prob=0.5, type='RandomShift'),
202
- dict(type='PackDetInputs'),
203
- ]
204
- val_cfg = dict(type='ValLoop')
205
- val_dataloader = dict(
206
- batch_size=1,
207
- dataset=dict(
208
- ann_file='annotations/instances_val2017.json',
209
- data_prefix=dict(img='val2017/'),
210
- data_root='data/coco/',
211
- pipeline=[
212
- dict(file_client_args=dict(backend='disk'), type='LoadImageFromFile'),
213
- dict(keep_ratio=True, scale=(
214
- 1333,
215
- 800,
216
- ), type='Resize'),
217
- dict(type='LoadAnnotations', with_bbox=True),
218
- dict(
219
- meta_keys=(
220
- 'img_id',
221
- 'img_path',
222
- 'ori_shape',
223
- 'img_shape',
224
- 'scale_factor',
225
- ),
226
- type='PackDetInputs'),
227
- ],
228
- test_mode=True,
229
- type='CocoDataset'),
230
- drop_last=False,
231
- num_workers=2,
232
- persistent_workers=True,
233
- sampler=dict(shuffle=False, type='DefaultSampler'))
234
- val_evaluator = dict(
235
- ann_file='data/coco/annotations/instances_val2017.json',
236
- format_only=False,
237
- metric='bbox',
238
- type='CocoMetric')
239
- vis_backends = [
240
- dict(type='LocalVisBackend'),
241
- ]
242
- visualizer = dict(
243
- name='visualizer', type='DetLocalVisualizer', vis_backends=[
244
- dict(type='LocalVisBackend'),
245
- ])
@@ -1,90 +0,0 @@
1
- # This file contains boilerplate code to allow users write their model
2
- # inference code that will then interact with the Triton Inference Server
3
- # Python backend to serve end user requests.
4
- # The module name, module path, class name & get_predictions() method names MUST be maintained as is
5
- # but other methods may be added within the class as deemed fit provided
6
- # they are invoked within the main get_predictions() inference method
7
- # if they play a role in any step of model inference
8
- """User model inference script."""
9
-
10
- import os
11
- from pathlib import Path
12
-
13
- import numpy as np
14
- import torch
15
- from mmdet.apis import inference_detector, init_detector
16
- from mmdet.utils import register_all_modules
17
-
18
- from clarifai.models.model_serving.model_config import ModelTypes, get_model_config
19
- from clarifai.models.model_serving.models.output import VisualDetectorOutput
20
-
21
- # Initialize the DetInferencer
22
- register_all_modules()
23
-
24
- config = get_model_config(ModelTypes.visual_detector)
25
-
26
-
27
- class InferenceModel:
28
- """User model inference class."""
29
-
30
- def __init__(self) -> None:
31
- """
32
- Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
33
- in this method so they are loaded only once for faster inference.
34
- """
35
- self.base_path: Path = os.path.dirname(__file__)
36
- self.checkpoint = os.path.join(self.base_path,
37
- "config/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth")
38
- self.config_path = os.path.join(self.base_path, "config/yolof_r50_c5_8x8_1x_coco.py")
39
- self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
40
- self.model = init_detector(self.config_path, self.checkpoint, device=self.device)
41
-
42
- @config.inference.wrap_func
43
- def get_predictions(self, input_data: list, **kwargs) -> list:
44
- """
45
- Main model inference method.
46
-
47
- Args:
48
- -----
49
- input_data: A list of input data item to predict on.
50
- Input data can be an image or text, etc depending on the model type.
51
-
52
- **kwargs: your inference parameters.
53
-
54
- Returns:
55
- --------
56
- List of one of the `clarifai.models.model_serving.models.output types` or `config.inference.return_type(your_output)`. Refer to the README/docs
57
- """
58
- max_bbox_count = 500 # max allowed detected bounding boxes per image
59
- outputs = []
60
-
61
- if isinstance(input_data, np.ndarray) and len(input_data.shape) == 4:
62
- input_data = list(input_data)
63
-
64
- predictions = inference_detector(self.model, input_data)
65
- for inp_data, preds in zip(input_data, predictions):
66
-
67
- labels = preds.pred_instances.labels.cpu().numpy()
68
- bboxes = preds.pred_instances.bboxes.cpu().numpy()
69
- scores = preds.pred_instances.scores.cpu().numpy()
70
- labels = [[each] for each in labels]
71
- scores = [[each] for each in scores]
72
- h, w, _ = inp_data.shape # input image shape
73
- bboxes = [[x[1] / h, x[0] / w, x[3] / h, x[2] / w]
74
- for x in bboxes] # normalize the bboxes to [0,1]
75
- bboxes = np.clip(bboxes, 0, 1.)
76
- if len(bboxes) != 0:
77
- bboxes = np.concatenate((bboxes, np.zeros((max_bbox_count - len(bboxes), 4))))
78
- scores = np.concatenate((scores, np.zeros((max_bbox_count - len(scores), 1))))
79
- labels = np.concatenate((labels, np.zeros(
80
- (max_bbox_count - len(labels), 1), dtype=np.int32)))
81
- else:
82
- bboxes = np.zeros((max_bbox_count, 4), dtype=np.float32)
83
- scores = np.zeros((max_bbox_count, 1), dtype=np.float32)
84
- labels = np.zeros((max_bbox_count, 1), dtype=np.int32)
85
-
86
- outputs.append(
87
- VisualDetectorOutput(
88
- predicted_bboxes=bboxes, predicted_labels=labels, predicted_scores=scores))
89
-
90
- return outputs
@@ -1,74 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- """Triton inference server Python Backend Model."""
14
-
15
- import os
16
- import sys
17
-
18
- try:
19
- import triton_python_backend_utils as pb_utils
20
- except ModuleNotFoundError:
21
- pass
22
- from google.protobuf import text_format
23
- from tritonclient.grpc.model_config_pb2 import ModelConfig
24
- from clarifai.models.model_serving.model_config.inference_parameter import parse_req_parameters
25
-
26
-
27
- class TritonPythonModel:
28
- """
29
- Triton Python BE Model.
30
- """
31
-
32
- def initialize(self, args):
33
- """
34
- Triton server init.
35
- """
36
- args["model_repository"] = args["model_repository"].replace("/1/model.py", "")
37
- sys.path.append(os.path.dirname(__file__))
38
- from inference import InferenceModel
39
-
40
- self.inference_obj = InferenceModel()
41
-
42
- # Read input_name from config file
43
- self.config_msg = ModelConfig()
44
- with open(os.path.join(args["model_repository"], "config.pbtxt"), "r") as f:
45
- cfg = f.read()
46
- text_format.Merge(cfg, self.config_msg)
47
- self.input_names = [inp.name for inp in self.config_msg.input]
48
-
49
- def execute(self, requests):
50
- """
51
- Serve model inference requests.
52
- """
53
- responses = []
54
-
55
- for request in requests:
56
- parameters = request.parameters()
57
- parameters = parse_req_parameters(parameters) if parameters else {}
58
-
59
- if len(self.input_names) == 1:
60
- in_batch = pb_utils.get_input_tensor_by_name(request, self.input_names[0])
61
- in_batch = in_batch.as_numpy()
62
- inference_response = self.inference_obj.get_predictions(in_batch, **parameters)
63
- else:
64
- multi_in_batch_dict = {}
65
- for input_name in self.input_names:
66
- in_batch = pb_utils.get_input_tensor_by_name(request, input_name)
67
- in_batch = in_batch.as_numpy() if in_batch is not None else []
68
- multi_in_batch_dict.update({input_name: in_batch})
69
-
70
- inference_response = self.inference_obj.get_predictions(multi_in_batch_dict, **parameters)
71
-
72
- responses.append(inference_response)
73
-
74
- return responses
@@ -1,64 +0,0 @@
1
- import logging
2
- import os
3
- import unittest
4
-
5
- from clarifai.models.model_serving.models.default_test import DefaultTestInferenceModel
6
-
7
-
8
- class CustomTestInferenceModel(DefaultTestInferenceModel):
9
- """
10
- Run this file to test your implementation of InferenceModel in inference.py with default tests of Triton configuration and its output values based on basic predefined inputs
11
- If you want to write custom testcase or just test output value.
12
- Please follow these instrucitons:
13
- 1. Name your test function with prefix "test" so that pytest can execute
14
- 2. In order to obtain output of InferenceModel, call `self.triton_get_predictions(input_data)`.
15
- 3. If your input is `image` and you have set custom size of it when building model repository,
16
- call `self.preprocess(image)` to obtain correct resized input
17
- 4. Run this test by calling
18
- ```bash
19
- pytest ./your_triton_folder/1/test.py
20
- #to see std output
21
- pytest --log-cli-level=INFO -s ./your_triton_folder/1/test.py
22
- ```
23
-
24
- ### Examples:
25
- + test text-to-image output
26
- ```
27
- def test_text_to_image_output(self):
28
- text = "Test text"
29
- output = self.triton_get_predictions(text)
30
- image = output.image # uint8 np.ndarray image
31
- #show or save
32
- ```
33
- + test visual-classifier output
34
- ```
35
- def test_visual_classifier(self):
36
- image = cv2.imread("your/local/image.jpg") # Keep in mind of format of image (BGR or RGB)
37
- output = self.triton_get_predictions(image)
38
- scores = output.predicted_scores # np.ndarray
39
- #process scores to get class id and its score
40
- logger.info(result)
41
- """
42
-
43
- # Insert your inference parameters json path here
44
- # or insert a dictionary of your_parameter_name and value, e.g dict(x=1.5, y="text", c=True)
45
- # or Leave it as "" if you don't have it.
46
- inference_parameters = ""
47
-
48
- ########### Initialization. Do not change it ###########
49
- __test__ = True
50
-
51
- def setUp(self) -> None:
52
- logging.info("Initializing...")
53
- model_type = "visual-detector" # your model type
54
- self.intitialize(
55
- model_type,
56
- repo_version_dir=os.path.dirname(__file__),
57
- is_instance_kind_gpu=True,
58
- inference_parameters=self.inference_parameters)
59
-
60
- ########################################################
61
-
62
-
63
- if __name__ == '__main__':
64
- unittest.main()
@@ -1,36 +0,0 @@
1
- name: "yolov5x"
2
- max_batch_size: 1
3
- input {
4
- name: "image"
5
- data_type: TYPE_UINT8
6
- dims: -1
7
- dims: -1
8
- dims: 3
9
- }
10
- output {
11
- name: "predicted_bboxes"
12
- data_type: TYPE_FP32
13
- dims: -1
14
- dims: 4
15
- }
16
- output {
17
- name: "predicted_labels"
18
- data_type: TYPE_INT32
19
- dims: -1
20
- dims: 1
21
- label_filename: "labels.txt"
22
- }
23
- output {
24
- name: "predicted_scores"
25
- data_type: TYPE_FP32
26
- dims: -1
27
- dims: 1
28
- }
29
- instance_group {
30
- count: 1
31
- kind: KIND_GPU
32
- }
33
- dynamic_batching {
34
- max_queue_delay_microseconds: 500
35
- }
36
- backend: "python"
@@ -1,80 +0,0 @@
1
- person
2
- bicycle
3
- car
4
- motorcycle
5
- airplane
6
- bus
7
- train
8
- truck
9
- boat
10
- traffic-light
11
- fire-hydrant
12
- stop-sign
13
- parking-meter
14
- bench
15
- bird
16
- cat
17
- dog
18
- horse
19
- sheep
20
- cow
21
- elephant
22
- bear
23
- zebra
24
- giraffe
25
- backpack
26
- umbrella
27
- handbag
28
- tie
29
- suitcase
30
- frisbee
31
- skis
32
- snowboard
33
- sports-ball
34
- kite
35
- baseball-bat
36
- baseball-glove
37
- skateboard
38
- surfboard
39
- tennis-racket
40
- bottle
41
- wine-glass
42
- cup
43
- fork
44
- knife
45
- spoon
46
- bowl
47
- banana
48
- apple
49
- sandwich
50
- orange
51
- broccoli
52
- carrot
53
- hot-dog
54
- pizza
55
- donut
56
- cake
57
- chair
58
- couch
59
- potted-plant
60
- bed
61
- dining-table
62
- toilet
63
- tv
64
- laptop
65
- mouse
66
- remote
67
- keyboard
68
- cell-phone
69
- microwave
70
- oven
71
- toaster
72
- sink
73
- refrigerator
74
- book
75
- clock
76
- vase
77
- scissors
78
- teddy-bear
79
- hair-drier
80
- toothbrush
@@ -1,8 +0,0 @@
1
- mmdet==3.0.0rc3
2
- mmcv==2.0.0rc3
3
- -f https://download.openmmlab.com/mmcv/dist/cu117/torch1.13/index.html
4
-
5
- tritonclient[all]
6
- clarifai>9.10.4
7
- torch==1.13.1
8
- numpy==1.23.1
@@ -1,12 +0,0 @@
1
- ## Visual Embedding Triton Model Examples
2
-
3
- These can be used on the fly with minimal or no changes to test deploy visual embedding models to the Clarifai platform. See the required files section for each model below.
4
-
5
- * ### [vit-base](./vit-base/)
6
-
7
- Requirements to run tests locally:
8
-
9
- * Download the [model checkpoint & sentencepiece bpe model from huggingface](https://huggingface.co/google/vit-base-patch16-224/tree/main) and store it under `vit-base/1/checkpoint`
10
- ```
11
- huggingface-cli download google/vit-base-patch16-224 --local-dir vit-base/1/checkpoint --local-dir-use-symlinks False --exclude *.msgpack *.h5 *.safetensors
12
- ```
@@ -1,56 +0,0 @@
1
- # This file contains boilerplate code to allow users write their model
2
- # inference code that will then interact with the Triton Inference Server
3
- # Python backend to serve end user requests.
4
- # The module name, module path, class name & get_predictions() method names MUST be maintained as is
5
- # but other methods may be added within the class as deemed fit provided
6
- # they are invoked within the main get_predictions() inference method
7
- # if they play a role in any step of model inference
8
- """User model inference script."""
9
-
10
- import os
11
- from pathlib import Path
12
-
13
- import torch
14
- from transformers import AutoModel, ViTImageProcessor
15
-
16
- from clarifai.models.model_serving.model_config import ModelTypes, get_model_config
17
- from clarifai.models.model_serving.models.output import EmbeddingOutput
18
-
19
- config = get_model_config(ModelTypes.visual_embedder)
20
-
21
-
22
- class InferenceModel:
23
- """User model inference class."""
24
-
25
- def __init__(self) -> None:
26
- """
27
- Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
28
- in this method so they are loaded only once for faster inference.
29
- """
30
- self.base_path: Path = os.path.dirname(__file__)
31
- self.huggingface_model_path = os.path.join(self.base_path, "checkpoint")
32
- self.processor = ViTImageProcessor.from_pretrained(self.huggingface_model_path)
33
- self.model = AutoModel.from_pretrained(self.huggingface_model_path)
34
-
35
- @config.inference.wrap_func
36
- def get_predictions(self, input_data):
37
- """
38
- Main model inference method.
39
-
40
- Args:
41
- -----
42
- input_data: A single input data item to predict on.
43
- Input data can be an image or text, etc depending on the model type.
44
-
45
- Returns:
46
- --------
47
- One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
48
- """
49
- outputs = []
50
- inputs = self.processor(images=input_data, return_tensors="pt")
51
- with torch.no_grad():
52
- embedding_vectors = self.model(**inputs).last_hidden_state[:, 0].cpu().numpy()
53
- for embedding_vector in embedding_vectors:
54
- outputs.append(EmbeddingOutput(embedding_vector=embedding_vector))
55
-
56
- return outputs