clarifai 9.11.1__py3-none-any.whl → 10.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. clarifai/client/input.py +34 -1
  2. clarifai/client/workflow.py +6 -2
  3. clarifai/constants/rag.py +1 -0
  4. clarifai/models/model_serving/README.md +1 -1
  5. clarifai/models/model_serving/models/default_test.py +3 -0
  6. clarifai/rag/__init__.py +3 -0
  7. clarifai/rag/rag.py +261 -0
  8. clarifai/rag/utils.py +102 -0
  9. clarifai/versions.py +1 -1
  10. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/METADATA +16 -3
  11. clarifai-10.0.0.dist-info/RECORD +103 -0
  12. clarifai/models/model_serving/examples/README.md +0 -7
  13. clarifai/models/model_serving/examples/image_classification/README.md +0 -12
  14. clarifai/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
  15. clarifai/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -64
  16. clarifai/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -74
  17. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
  18. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
  19. clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
  20. clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
  21. clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
  22. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
  23. clarifai/models/model_serving/examples/multimodal_embedder/README.md +0 -12
  24. clarifai/models/model_serving/examples/multimodal_embedder/clip/1/__init__.py +0 -0
  25. clarifai/models/model_serving/examples/multimodal_embedder/clip/1/inference.py +0 -66
  26. clarifai/models/model_serving/examples/multimodal_embedder/clip/1/model.py +0 -74
  27. clarifai/models/model_serving/examples/multimodal_embedder/clip/1/test.py +0 -64
  28. clarifai/models/model_serving/examples/multimodal_embedder/clip/config.pbtxt +0 -29
  29. clarifai/models/model_serving/examples/multimodal_embedder/clip/requirements.txt +0 -4
  30. clarifai/models/model_serving/examples/text_classification/README.md +0 -12
  31. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
  32. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -62
  33. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -74
  34. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
  35. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
  36. clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
  37. clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
  38. clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
  39. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
  40. clarifai/models/model_serving/examples/text_embedding/README.md +0 -12
  41. clarifai/models/model_serving/examples/text_embedding/instructor-xl/1/__init__.py +0 -0
  42. clarifai/models/model_serving/examples/text_embedding/instructor-xl/1/inference.py +0 -63
  43. clarifai/models/model_serving/examples/text_embedding/instructor-xl/1/model.py +0 -74
  44. clarifai/models/model_serving/examples/text_embedding/instructor-xl/1/test.py +0 -64
  45. clarifai/models/model_serving/examples/text_embedding/instructor-xl/config.pbtxt +0 -20
  46. clarifai/models/model_serving/examples/text_embedding/instructor-xl/requirements.txt +0 -9
  47. clarifai/models/model_serving/examples/text_to_image/README.md +0 -10
  48. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
  49. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -58
  50. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -74
  51. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
  52. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
  53. clarifai/models/model_serving/examples/text_to_text/README.md +0 -12
  54. clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  55. clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -59
  56. clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -74
  57. clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
  58. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
  59. clarifai/models/model_serving/examples/visual_detection/Readme.md +0 -61
  60. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/1/inference.py +0 -96
  61. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/1/model.py +0 -74
  62. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/1/model_store/hub/checkpoints/keep +0 -0
  63. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/1/test.py +0 -62
  64. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/config.pbtxt +0 -35
  65. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/labels.txt +0 -80
  66. clarifai/models/model_serving/examples/visual_detection/faster-rcnn_torchserve/requirements.txt +0 -3
  67. clarifai/models/model_serving/examples/visual_detection/yolof/1/config/yolof_r50_c5_8x8_1x_coco.py +0 -245
  68. clarifai/models/model_serving/examples/visual_detection/yolof/1/inference.py +0 -90
  69. clarifai/models/model_serving/examples/visual_detection/yolof/1/model.py +0 -74
  70. clarifai/models/model_serving/examples/visual_detection/yolof/1/test.py +0 -64
  71. clarifai/models/model_serving/examples/visual_detection/yolof/config.pbtxt +0 -36
  72. clarifai/models/model_serving/examples/visual_detection/yolof/labels.txt +0 -80
  73. clarifai/models/model_serving/examples/visual_detection/yolof/requirements.txt +0 -8
  74. clarifai/models/model_serving/examples/visual_embedding/README.md +0 -12
  75. clarifai/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  76. clarifai/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -56
  77. clarifai/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -74
  78. clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
  79. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
  80. clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -12
  81. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  82. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -62
  83. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -74
  84. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
  85. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
  86. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
  87. clarifai/models/model_serving/examples/vllm/Readme.md +0 -12
  88. clarifai/models/model_serving/examples/vllm/example/1/__init__.py +0 -0
  89. clarifai/models/model_serving/examples/vllm/example/1/inference.py +0 -56
  90. clarifai/models/model_serving/examples/vllm/example/1/model.py +0 -74
  91. clarifai/models/model_serving/examples/vllm/example/1/test.py +0 -64
  92. clarifai/models/model_serving/examples/vllm/example/1/weights/keep +0 -0
  93. clarifai/models/model_serving/examples/vllm/example/config.pbtxt +0 -20
  94. clarifai/models/model_serving/examples/vllm/example/requirements.txt +0 -5
  95. clarifai-9.11.1.dist-info/RECORD +0 -182
  96. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/LICENSE +0 -0
  97. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/WHEEL +0 -0
  98. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/entry_points.txt +0 -0
  99. {clarifai-9.11.1.dist-info → clarifai-10.0.0.dist-info}/top_level.txt +0 -0
@@ -1,74 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- """Triton inference server Python Backend Model."""
14
-
15
- import os
16
- import sys
17
-
18
- try:
19
- import triton_python_backend_utils as pb_utils
20
- except ModuleNotFoundError:
21
- pass
22
- from google.protobuf import text_format
23
- from tritonclient.grpc.model_config_pb2 import ModelConfig
24
- from clarifai.models.model_serving.model_config.inference_parameter import parse_req_parameters
25
-
26
-
27
- class TritonPythonModel:
28
- """
29
- Triton Python BE Model.
30
- """
31
-
32
- def initialize(self, args):
33
- """
34
- Triton server init.
35
- """
36
- args["model_repository"] = args["model_repository"].replace("/1/model.py", "")
37
- sys.path.append(os.path.dirname(__file__))
38
- from inference import InferenceModel
39
-
40
- self.inference_obj = InferenceModel()
41
-
42
- # Read input_name from config file
43
- self.config_msg = ModelConfig()
44
- with open(os.path.join(args["model_repository"], "config.pbtxt"), "r") as f:
45
- cfg = f.read()
46
- text_format.Merge(cfg, self.config_msg)
47
- self.input_names = [inp.name for inp in self.config_msg.input]
48
-
49
- def execute(self, requests):
50
- """
51
- Serve model inference requests.
52
- """
53
- responses = []
54
-
55
- for request in requests:
56
- parameters = request.parameters()
57
- parameters = parse_req_parameters(parameters) if parameters else {}
58
-
59
- if len(self.input_names) == 1:
60
- in_batch = pb_utils.get_input_tensor_by_name(request, self.input_names[0])
61
- in_batch = in_batch.as_numpy()
62
- inference_response = self.inference_obj.get_predictions(in_batch, **parameters)
63
- else:
64
- multi_in_batch_dict = {}
65
- for input_name in self.input_names:
66
- in_batch = pb_utils.get_input_tensor_by_name(request, input_name)
67
- in_batch = in_batch.as_numpy() if in_batch is not None else []
68
- multi_in_batch_dict.update({input_name: in_batch})
69
-
70
- inference_response = self.inference_obj.get_predictions(multi_in_batch_dict, **parameters)
71
-
72
- responses.append(inference_response)
73
-
74
- return responses
@@ -1,20 +0,0 @@
1
- name: "bart-summarize"
2
- max_batch_size: 1
3
- input {
4
- name: "text"
5
- data_type: TYPE_STRING
6
- dims: 1
7
- }
8
- output {
9
- name: "text"
10
- data_type: TYPE_STRING
11
- dims: 1
12
- }
13
- instance_group {
14
- count: 1
15
- kind: KIND_GPU
16
- }
17
- dynamic_batching {
18
- max_queue_delay_microseconds: 500
19
- }
20
- backend: "python"
@@ -1,4 +0,0 @@
1
- clarifai>9.10.5
2
- tritonclient[all]
3
- torch==1.13.1
4
- transformers==4.30.2
@@ -1,61 +0,0 @@
1
- ## Visual Detection Triton Model Examples
2
- These can be used on the fly with minimal or no changes to test deploy visual detection models to the Clarifai platform. See the required files section for each model below.
3
-
4
- ## [YOLOF](https://github.com/open-mmlab/mmdetection/tree/v3.0.0rc3/configs/yolof)
5
-
6
- Requirements to run tests locally:
7
-
8
- Download checkpoint and save it in yolof/1/config/:
9
- ```bash
10
- wget -P yolof/1/config https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth
11
- ```
12
- Install dependecies
13
- ```bash
14
- pip install -r yolof/requirements.txt
15
- ```
16
- Test locally:
17
- ```bash
18
- pip install pytest
19
- python -m pytest -s yolof/1/test.py
20
- ```
21
- Zip it and upload to Clarifai
22
- ```bash
23
- clarifai-triton-zip --triton_model_repository yolof --zipfile_name yolof.zip
24
- # then upload your zip to cloud and obtain url
25
- clarifai-upload-model --model_type visual-detector --model_id <your model id> --url <url>
26
- ```
27
-
28
- ## Torch serve model format [Faster-RCNN](https://github.com/pytorch/serve/tree/master/examples/object_detector/fast-rcnn)
29
- To utilize a Torch serve model (.mar file) created by running torch-model-archiver – essentially a zip file containing the model checkpoint, Python code, and other components – within this module, follow these steps:
30
-
31
- 1. Unzip the .mar file to obtain your checkpoint.
32
- 2. Implement your postprocess method in inference.py.
33
-
34
- For example: faster-rcnn, suppose you already have .mar file following the torch serve example
35
-
36
- unzip it to ./faster-rcnn_torchserve/1/model_store/hub/checkpoints
37
- ```bash
38
- unzip faster_rcnn.mar -d ./faster-rcnn_torchserve/1/model_store/
39
- ```
40
-
41
- ```bash
42
- # in model_store you will have
43
- model_store/
44
- ├── MAR-INF
45
- │ └── MANIFEST.json
46
- ├── model.py
47
- └── fasterrcnn_resnet50_fpn_coco-258fb6c6.pth
48
- ```
49
- ```bash
50
- # then relocate the checkpoint to ./faster-rcnn_torchserve/1/model_store/hub/checkpoints
51
- # as the Torch cache is configured to use this folder in inference.py.
52
- mv ./faster-rcnn_torchserve/1/model_store/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth ./faster-rcnn_torchserve/1/model_store/hub/checkpoints/
53
- ```
54
-
55
- test, zip and upload the model
56
- ```bash
57
- # zip
58
- clarifai-triton-zip --triton_model_repository faster-rcnn_torchserve --zipfile_name faster-rcnn_torchserve.zip
59
- # then upload your zip to cloud and obtain url
60
- clarifai-upload-model --model_type visual-detector --model_id <your model id> --url <url>
61
- ```
@@ -1,96 +0,0 @@
1
- # This file contains boilerplate code to allow users write their model
2
- # inference code that will then interact with the Triton Inference Server
3
- # Python backend to serve end user requests.
4
- # The module name, module path, class name & get_predictions() method names MUST be maintained as is
5
- # but other methods may be added within the class as deemed fit provided
6
- # they are invoked within the main get_predictions() inference method
7
- # if they play a role in any step of model inference
8
- """User model inference script."""
9
-
10
- import os
11
-
12
- ROOT = os.path.dirname(__file__)
13
- os.environ['TORCH_HOME'] = os.path.join(ROOT, "model_store")
14
-
15
- from pathlib import Path # noqa: E402
16
- import numpy as np # noqa: E402
17
- import torch # noqa: E402
18
- from PIL import Image # noqa: E402
19
- from torchvision import models, transforms # noqa: E402
20
-
21
- from clarifai.models.model_serving.model_config import ModelTypes, get_model_config # noqa: E402
22
- from clarifai.models.model_serving.models.output import VisualDetectorOutput # noqa: E402
23
-
24
- config = get_model_config(ModelTypes.visual_detector)
25
-
26
-
27
- class InferenceModel:
28
- """User model inference class."""
29
-
30
- def __init__(self) -> None:
31
- """
32
- Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
33
- in this method so they are loaded only once for faster inference.
34
- """
35
- self.base_path: Path = os.path.dirname(__file__)
36
- #self.checkpoint = os.path.join(ROOT, "model_store/hub/checkpoints/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth")
37
- self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
38
-
39
- self.transform = transforms.Compose([
40
- transforms.ToTensor(),
41
- ])
42
- self.model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
43
- self.model = self.model.to(self.device)
44
- self.model.eval()
45
-
46
- @config.inference.wrap_func
47
- def get_predictions(self, input_data: list, **kwargs) -> list:
48
- """
49
- Main model inference method.
50
-
51
- Args:
52
- -----
53
- input_data: A list of input data item to predict on.
54
- Input data can be an image or text, etc depending on the model type.
55
-
56
- **kwargs: your inference parameters.
57
-
58
- Returns:
59
- --------
60
- List of one of the `clarifai.models.model_serving.models.output types` or `config.inference.return_type(your_output)`. Refer to the README/docs
61
- """
62
- max_bbox_count = 300 # max allowed detected bounding boxes per image
63
- outputs = []
64
-
65
- if isinstance(input_data, np.ndarray) and len(input_data.shape) == 4:
66
- input_data = list(input_data)
67
-
68
- input_tensor = [self.transform(Image.fromarray(each)) for each in input_data]
69
- input_tensor = torch.stack(input_tensor).to(self.device)
70
-
71
- with torch.no_grad():
72
- predictions = self.model(input_tensor)
73
-
74
- for inp_data, preds in zip(input_data, predictions):
75
- boxes = preds["boxes"].cpu().numpy()
76
- labels = [[pred] for pred in preds["labels"].detach().cpu().numpy()]
77
- scores = [[pred] for pred in preds["scores"].detach().cpu().numpy()]
78
- h, w, _ = inp_data.shape # input image shape
79
- bboxes = [[x[1] / h, x[0] / w, x[3] / h, x[2] / w]
80
- for x in boxes] # normalize the bboxes to [0,1]
81
- bboxes = np.clip(bboxes, 0, 1)
82
- if len(bboxes) != 0:
83
- bboxes = np.concatenate((bboxes, np.zeros((max_bbox_count - len(bboxes), 4))))
84
- scores = np.concatenate((scores, np.zeros((max_bbox_count - len(scores), 1))))
85
- labels = np.concatenate((labels, np.zeros(
86
- (max_bbox_count - len(labels), 1), dtype=np.int32)))
87
- else:
88
- bboxes = np.zeros((max_bbox_count, 4), dtype=np.float32)
89
- scores = np.zeros((max_bbox_count, 1), dtype=np.float32)
90
- labels = np.zeros((max_bbox_count, 1), dtype=np.int32)
91
-
92
- outputs.append(
93
- VisualDetectorOutput(
94
- predicted_bboxes=bboxes, predicted_labels=labels, predicted_scores=scores))
95
-
96
- return outputs
@@ -1,74 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- """Triton inference server Python Backend Model."""
14
-
15
- import os
16
- import sys
17
-
18
- try:
19
- import triton_python_backend_utils as pb_utils
20
- except ModuleNotFoundError:
21
- pass
22
- from google.protobuf import text_format
23
- from tritonclient.grpc.model_config_pb2 import ModelConfig
24
- from clarifai.models.model_serving.model_config.inference_parameter import parse_req_parameters
25
-
26
-
27
- class TritonPythonModel:
28
- """
29
- Triton Python BE Model.
30
- """
31
-
32
- def initialize(self, args):
33
- """
34
- Triton server init.
35
- """
36
- args["model_repository"] = args["model_repository"].replace("/1/model.py", "")
37
- sys.path.append(os.path.dirname(__file__))
38
- from inference import InferenceModel
39
-
40
- self.inference_obj = InferenceModel()
41
-
42
- # Read input_name from config file
43
- self.config_msg = ModelConfig()
44
- with open(os.path.join(args["model_repository"], "config.pbtxt"), "r") as f:
45
- cfg = f.read()
46
- text_format.Merge(cfg, self.config_msg)
47
- self.input_names = [inp.name for inp in self.config_msg.input]
48
-
49
- def execute(self, requests):
50
- """
51
- Serve model inference requests.
52
- """
53
- responses = []
54
-
55
- for request in requests:
56
- parameters = request.parameters()
57
- parameters = parse_req_parameters(parameters) if parameters else {}
58
-
59
- if len(self.input_names) == 1:
60
- in_batch = pb_utils.get_input_tensor_by_name(request, self.input_names[0])
61
- in_batch = in_batch.as_numpy()
62
- inference_response = self.inference_obj.get_predictions(in_batch, **parameters)
63
- else:
64
- multi_in_batch_dict = {}
65
- for input_name in self.input_names:
66
- in_batch = pb_utils.get_input_tensor_by_name(request, input_name)
67
- in_batch = in_batch.as_numpy() if in_batch is not None else []
68
- multi_in_batch_dict.update({input_name: in_batch})
69
-
70
- inference_response = self.inference_obj.get_predictions(multi_in_batch_dict, **parameters)
71
-
72
- responses.append(inference_response)
73
-
74
- return responses
@@ -1,62 +0,0 @@
1
- import logging
2
- import os
3
- import unittest
4
-
5
- from clarifai.models.model_serving.models.default_test import DefaultTestInferenceModel
6
-
7
-
8
- class CustomTestInferenceModel(DefaultTestInferenceModel):
9
- """
10
- Run this file to test your implementation of InferenceModel in inference.py with default tests of Triton configuration and its output values based on basic predefined inputs
11
- If you want to write custom testcase or just test output value.
12
- Please follow these instrucitons:
13
- 1. Name your test function with prefix "test" so that pytest can execute
14
- 2. In order to obtain output of InferenceModel, call `self.triton_get_predictions(input_data)`.
15
- 3. If your input is `image` and you have set custom size of it when building model repository,
16
- call `self.preprocess(image)` to obtain correct resized input
17
- 4. Run this test by calling
18
- ```bash
19
- pytest ./your_triton_folder/1/test.py
20
- #to see std output
21
- pytest --log-cli-level=INFO -s ./your_triton_folder/1/test.py
22
- ```
23
-
24
- ### Examples:
25
- + test text-to-image output
26
- ```
27
- def test_text_to_image_output(self):
28
- text = "Test text"
29
- output = self.triton_get_predictions(text)
30
- image = output.image # uint8 np.ndarray image
31
- #show or save
32
- ```
33
- + test visual-classifier output
34
- ```
35
- def test_visual_classifier(self):
36
- image = cv2.imread("your/local/image.jpg") # Keep in mind of format of image (BGR or RGB)
37
- output = self.triton_get_predictions(image)
38
- scores = output.predicted_scores # np.ndarray
39
- #process scores to get class id and its score
40
- logger.info(result)
41
- """
42
-
43
- # Insert your inference parameters json path here
44
- # or insert a dictionary of your_parameter_name and value, e.g dict(x=1.5, y="text", c=True)
45
- # or Leave it as "" if you don't have it.
46
- inference_parameters = ""
47
-
48
- ########### Initialization. Do not change it ###########
49
- __test__ = True
50
-
51
- def setUp(self) -> None:
52
- logging.info("Initializing...")
53
- model_type = "visual-detector" # your model type
54
- self.intitialize(
55
- model_type,
56
- repo_version_dir=os.path.dirname(__file__),
57
- is_instance_kind_gpu=True,
58
- inference_parameters=self.inference_parameters)
59
-
60
-
61
- if __name__ == '__main__':
62
- unittest.main()
@@ -1,35 +0,0 @@
1
- max_batch_size: 1
2
- input {
3
- name: "image"
4
- data_type: TYPE_UINT8
5
- dims: -1
6
- dims: -1
7
- dims: 3
8
- }
9
- output {
10
- name: "predicted_bboxes"
11
- data_type: TYPE_FP32
12
- dims: -1
13
- dims: 4
14
- }
15
- output {
16
- name: "predicted_labels"
17
- data_type: TYPE_INT32
18
- dims: -1
19
- dims: 1
20
- label_filename: "labels.txt"
21
- }
22
- output {
23
- name: "predicted_scores"
24
- data_type: TYPE_FP32
25
- dims: -1
26
- dims: 1
27
- }
28
- instance_group {
29
- count: 1
30
- kind: KIND_GPU
31
- }
32
- dynamic_batching {
33
- max_queue_delay_microseconds: 500
34
- }
35
- backend: "python"
@@ -1,80 +0,0 @@
1
- person
2
- bicycle
3
- car
4
- motorcycle
5
- airplane
6
- bus
7
- train
8
- truck
9
- boat
10
- traffic-light
11
- fire-hydrant
12
- stop-sign
13
- parking-meter
14
- bench
15
- bird
16
- cat
17
- dog
18
- horse
19
- sheep
20
- cow
21
- elephant
22
- bear
23
- zebra
24
- giraffe
25
- backpack
26
- umbrella
27
- handbag
28
- tie
29
- suitcase
30
- frisbee
31
- skis
32
- snowboard
33
- sports-ball
34
- kite
35
- baseball-bat
36
- baseball-glove
37
- skateboard
38
- surfboard
39
- tennis-racket
40
- bottle
41
- wine-glass
42
- cup
43
- fork
44
- knife
45
- spoon
46
- bowl
47
- banana
48
- apple
49
- sandwich
50
- orange
51
- broccoli
52
- carrot
53
- hot-dog
54
- pizza
55
- donut
56
- cake
57
- chair
58
- couch
59
- potted-plant
60
- bed
61
- dining-table
62
- toilet
63
- tv
64
- laptop
65
- mouse
66
- remote
67
- keyboard
68
- cell-phone
69
- microwave
70
- oven
71
- toaster
72
- sink
73
- refrigerator
74
- book
75
- clock
76
- vase
77
- scissors
78
- teddy-bear
79
- hair-drier
80
- toothbrush
@@ -1,3 +0,0 @@
1
- tritonclient[all]
2
- clarifai>9.10.4
3
- torch==2.0.1