clarifai 9.10.1__py3-none-any.whl → 9.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/client/__init__.py +3 -2
- clarifai/client/app.py +39 -23
- clarifai/client/base.py +6 -6
- clarifai/client/dataset.py +113 -55
- clarifai/client/input.py +47 -55
- clarifai/client/model.py +27 -25
- clarifai/client/module.py +13 -11
- clarifai/client/runner.py +5 -3
- clarifai/client/search.py +29 -10
- clarifai/client/user.py +14 -8
- clarifai/client/workflow.py +22 -20
- clarifai/constants/dataset.py +22 -0
- clarifai/datasets/upload/base.py +9 -7
- clarifai/datasets/upload/features.py +3 -3
- clarifai/datasets/upload/image.py +49 -50
- clarifai/datasets/upload/loaders/coco_captions.py +26 -80
- clarifai/datasets/upload/loaders/coco_detection.py +56 -115
- clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
- clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
- clarifai/datasets/upload/loaders/xview_detection.py +3 -3
- clarifai/datasets/upload/text.py +16 -16
- clarifai/datasets/upload/utils.py +196 -21
- clarifai/utils/misc.py +21 -0
- clarifai/versions.py +1 -1
- {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
- clarifai-9.10.3.dist-info/RECORD +96 -0
- clarifai-9.10.3.dist-info/top_level.txt +1 -0
- clarifai/auth/__init__.py +0 -6
- clarifai/auth/helper.py +0 -367
- clarifai/auth/register.py +0 -23
- clarifai/auth/stub.py +0 -127
- clarifai/datasets/upload/examples/README.md +0 -31
- clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai/datasets/upload/loaders/README.md +0 -49
- clarifai/models/model_serving/README.md +0 -155
- clarifai/models/model_serving/docs/custom_config.md +0 -33
- clarifai/models/model_serving/docs/dependencies.md +0 -11
- clarifai/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai/models/model_serving/docs/model_types.md +0 -20
- clarifai/models/model_serving/docs/output.md +0 -28
- clarifai/models/model_serving/examples/README.md +0 -7
- clarifai/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai/modules/README.md +0 -5
- clarifai/modules/style.css +0 -217
- clarifai-9.10.1.dist-info/RECORD +0 -386
- clarifai-9.10.1.dist-info/top_level.txt +0 -2
- clarifai_utils/__init__.py +0 -0
- clarifai_utils/auth/__init__.py +0 -6
- clarifai_utils/auth/helper.py +0 -367
- clarifai_utils/auth/register.py +0 -23
- clarifai_utils/auth/stub.py +0 -127
- clarifai_utils/cli.py +0 -0
- clarifai_utils/client/__init__.py +0 -16
- clarifai_utils/client/app.py +0 -684
- clarifai_utils/client/auth/__init__.py +0 -4
- clarifai_utils/client/auth/helper.py +0 -367
- clarifai_utils/client/auth/register.py +0 -23
- clarifai_utils/client/auth/stub.py +0 -127
- clarifai_utils/client/base.py +0 -131
- clarifai_utils/client/dataset.py +0 -442
- clarifai_utils/client/input.py +0 -892
- clarifai_utils/client/lister.py +0 -54
- clarifai_utils/client/model.py +0 -575
- clarifai_utils/client/module.py +0 -94
- clarifai_utils/client/runner.py +0 -161
- clarifai_utils/client/search.py +0 -239
- clarifai_utils/client/user.py +0 -253
- clarifai_utils/client/workflow.py +0 -223
- clarifai_utils/constants/model.py +0 -4
- clarifai_utils/constants/search.py +0 -2
- clarifai_utils/datasets/__init__.py +0 -0
- clarifai_utils/datasets/export/__init__.py +0 -0
- clarifai_utils/datasets/export/inputs_annotations.py +0 -222
- clarifai_utils/datasets/upload/__init__.py +0 -0
- clarifai_utils/datasets/upload/base.py +0 -66
- clarifai_utils/datasets/upload/examples/README.md +0 -31
- clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai_utils/datasets/upload/features.py +0 -44
- clarifai_utils/datasets/upload/image.py +0 -165
- clarifai_utils/datasets/upload/loaders/README.md +0 -49
- clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
- clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
- clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
- clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
- clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
- clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
- clarifai_utils/datasets/upload/text.py +0 -53
- clarifai_utils/datasets/upload/utils.py +0 -63
- clarifai_utils/errors.py +0 -89
- clarifai_utils/models/__init__.py +0 -0
- clarifai_utils/models/api.py +0 -283
- clarifai_utils/models/model_serving/README.md +0 -155
- clarifai_utils/models/model_serving/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
- clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
- clarifai_utils/models/model_serving/cli/repository.py +0 -87
- clarifai_utils/models/model_serving/constants.py +0 -1
- clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
- clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
- clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai_utils/models/model_serving/docs/model_types.md +0 -20
- clarifai_utils/models/model_serving/docs/output.md +0 -28
- clarifai_utils/models/model_serving/examples/README.md +0 -7
- clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
- clarifai_utils/models/model_serving/model_config/config.py +0 -302
- clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
- clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
- clarifai_utils/models/model_serving/models/__init__.py +0 -12
- clarifai_utils/models/model_serving/models/default_test.py +0 -275
- clarifai_utils/models/model_serving/models/inference.py +0 -42
- clarifai_utils/models/model_serving/models/model_types.py +0 -265
- clarifai_utils/models/model_serving/models/output.py +0 -124
- clarifai_utils/models/model_serving/models/pb_model.py +0 -74
- clarifai_utils/models/model_serving/models/test.py +0 -64
- clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
- clarifai_utils/modules/README.md +0 -5
- clarifai_utils/modules/__init__.py +0 -0
- clarifai_utils/modules/css.py +0 -60
- clarifai_utils/modules/pages.py +0 -42
- clarifai_utils/modules/style.css +0 -217
- clarifai_utils/runners/__init__.py +0 -0
- clarifai_utils/runners/example.py +0 -33
- clarifai_utils/schema/search.py +0 -69
- clarifai_utils/urls/helper.py +0 -103
- clarifai_utils/utils/__init__.py +0 -0
- clarifai_utils/utils/logging.py +0 -90
- clarifai_utils/utils/misc.py +0 -33
- clarifai_utils/utils/model_train.py +0 -157
- clarifai_utils/versions.py +0 -6
- clarifai_utils/workflows/__init__.py +0 -0
- clarifai_utils/workflows/export.py +0 -68
- clarifai_utils/workflows/utils.py +0 -59
- clarifai_utils/workflows/validate.py +0 -67
- {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
- {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
- {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
|
@@ -1,302 +0,0 @@
|
|
|
1
|
-
# Copyright 2023 Clarifai, Inc.
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
-
# you may not use this file except in compliance with the License.
|
|
4
|
-
# You may obtain a copy of the License at
|
|
5
|
-
#
|
|
6
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
-
#
|
|
8
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
-
# See the License for the specific language governing permissions and
|
|
12
|
-
# limitations under the License.
|
|
13
|
-
""" Model Config classes."""
|
|
14
|
-
|
|
15
|
-
from dataclasses import asdict, dataclass, field
|
|
16
|
-
from typing import List
|
|
17
|
-
|
|
18
|
-
import yaml
|
|
19
|
-
|
|
20
|
-
from ..models.model_types import * # noqa # pylint: disable=unused-import
|
|
21
|
-
from ..models.output import * # noqa # pylint: disable=unused-import
|
|
22
|
-
|
|
23
|
-
__all__ = ["get_model_config", "MODEL_TYPES", "TritonModelConfig", "ModelTypes"]
|
|
24
|
-
|
|
25
|
-
### Triton Model Config classes.###
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
@dataclass
|
|
29
|
-
class DType:
|
|
30
|
-
"""
|
|
31
|
-
Triton Model Config data types.
|
|
32
|
-
"""
|
|
33
|
-
# https://github.com/triton-inference-server/common/blob/main/protobuf/model_config.proto
|
|
34
|
-
TYPE_UINT8: int = 2
|
|
35
|
-
TYPE_INT8: int = 6
|
|
36
|
-
TYPE_INT16: int = 7
|
|
37
|
-
TYPE_INT32: int = 8
|
|
38
|
-
TYPE_INT64: int = 9
|
|
39
|
-
TYPE_FP16: int = 10
|
|
40
|
-
TYPE_FP32: int = 11
|
|
41
|
-
TYPE_STRING: int = 13
|
|
42
|
-
KIND_GPU: int = 1
|
|
43
|
-
KIND_CPU: int = 2
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
@dataclass
|
|
47
|
-
class InputConfig:
|
|
48
|
-
"""
|
|
49
|
-
Triton Input definition.
|
|
50
|
-
Params:
|
|
51
|
-
-------
|
|
52
|
-
name: input name
|
|
53
|
-
data_type: input data type
|
|
54
|
-
dims: Pre-defined input data shape(s).
|
|
55
|
-
|
|
56
|
-
Returns:
|
|
57
|
-
--------
|
|
58
|
-
InputConfig
|
|
59
|
-
"""
|
|
60
|
-
name: str
|
|
61
|
-
data_type: int
|
|
62
|
-
dims: List = field(default_factory=list)
|
|
63
|
-
optional: bool = False
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
@dataclass
|
|
67
|
-
class OutputConfig:
|
|
68
|
-
"""
|
|
69
|
-
Triton Output definition.
|
|
70
|
-
Params:
|
|
71
|
-
-------
|
|
72
|
-
name: output name
|
|
73
|
-
data_type: output data type
|
|
74
|
-
dims: Pre-defined output data shape(s).
|
|
75
|
-
labels (bool): If labels file is required for inference.
|
|
76
|
-
|
|
77
|
-
Returns:
|
|
78
|
-
--------
|
|
79
|
-
OutputConfig
|
|
80
|
-
"""
|
|
81
|
-
name: str
|
|
82
|
-
data_type: int
|
|
83
|
-
dims: List = field(default_factory=list)
|
|
84
|
-
labels: bool = False
|
|
85
|
-
|
|
86
|
-
def __post_init__(self):
|
|
87
|
-
if self.labels:
|
|
88
|
-
self.label_filename = "labels.txt"
|
|
89
|
-
del self.labels
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
@dataclass
|
|
93
|
-
class Device:
|
|
94
|
-
"""
|
|
95
|
-
Triton instance_group.
|
|
96
|
-
Define the type of inference device and number of devices to use.
|
|
97
|
-
Params:
|
|
98
|
-
-------
|
|
99
|
-
count: number of devices
|
|
100
|
-
use_gpu: whether to use cpu or gpu.
|
|
101
|
-
|
|
102
|
-
Returns:
|
|
103
|
-
--------
|
|
104
|
-
Device object
|
|
105
|
-
"""
|
|
106
|
-
count: int = 1
|
|
107
|
-
use_gpu: bool = True
|
|
108
|
-
|
|
109
|
-
def __post_init__(self):
|
|
110
|
-
if self.use_gpu:
|
|
111
|
-
self.kind: str = DType.KIND_GPU
|
|
112
|
-
else:
|
|
113
|
-
self.kind: str = DType.KIND_CPU
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
@dataclass
|
|
117
|
-
class DynamicBatching:
|
|
118
|
-
"""
|
|
119
|
-
Triton dynamic_batching config.
|
|
120
|
-
Params:
|
|
121
|
-
-------
|
|
122
|
-
preferred_batch_size: batch size
|
|
123
|
-
max_queue_delay_microseconds: max queue delay for a request batch
|
|
124
|
-
|
|
125
|
-
Returns:
|
|
126
|
-
--------
|
|
127
|
-
DynamicBatching object
|
|
128
|
-
"""
|
|
129
|
-
#preferred_batch_size: List[int] = [1] # recommended not to set
|
|
130
|
-
max_queue_delay_microseconds: int = 500
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
@dataclass
|
|
134
|
-
class TritonModelConfig:
|
|
135
|
-
"""
|
|
136
|
-
Triton Model Config base.
|
|
137
|
-
Params:
|
|
138
|
-
-------
|
|
139
|
-
name: triton inference model name
|
|
140
|
-
input: a list of an InputConfig field
|
|
141
|
-
output: a list of OutputConfig fields/dicts
|
|
142
|
-
instance_group: Device. see Device
|
|
143
|
-
dynamic_batching: Triton dynamic batching settings.
|
|
144
|
-
max_batch_size: max request batch size
|
|
145
|
-
backend: Triton Python Backend. Constant
|
|
146
|
-
|
|
147
|
-
Returns:
|
|
148
|
-
--------
|
|
149
|
-
TritonModelConfig
|
|
150
|
-
"""
|
|
151
|
-
model_type: str
|
|
152
|
-
model_name: str
|
|
153
|
-
model_version: str
|
|
154
|
-
image_shape: List #(H, W)
|
|
155
|
-
input: List[InputConfig] = field(default_factory=list)
|
|
156
|
-
output: List[OutputConfig] = field(default_factory=list)
|
|
157
|
-
instance_group: Device = field(default_factory=Device)
|
|
158
|
-
dynamic_batching: DynamicBatching = field(default_factory=DynamicBatching)
|
|
159
|
-
max_batch_size: int = 1
|
|
160
|
-
backend: str = "python"
|
|
161
|
-
|
|
162
|
-
def __post_init__(self):
|
|
163
|
-
if "image" in [each.name for each in self.input]:
|
|
164
|
-
image_dims = self.image_shape
|
|
165
|
-
image_dims.append(3) # add channel dim
|
|
166
|
-
self.input[0].dims = image_dims
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
### General Model Config classes & functions ###
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
# Clarifai model types
|
|
173
|
-
@dataclass
|
|
174
|
-
class ModelTypes:
|
|
175
|
-
visual_detector: str = "visual-detector"
|
|
176
|
-
visual_classifier: str = "visual-classifier"
|
|
177
|
-
text_classifier: str = "text-classifier"
|
|
178
|
-
text_to_text: str = "text-to-text"
|
|
179
|
-
text_embedder: str = "text-embedder"
|
|
180
|
-
text_to_image: str = "text-to-image"
|
|
181
|
-
visual_embedder: str = "visual-embedder"
|
|
182
|
-
visual_segmenter: str = "visual-segmenter"
|
|
183
|
-
multimodal_embedder: str = "multimodal-embedder"
|
|
184
|
-
|
|
185
|
-
def __post_init__(self):
|
|
186
|
-
self.all = list(asdict(self).values())
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
@dataclass
|
|
190
|
-
class InferenceConfig:
|
|
191
|
-
wrap_func: callable
|
|
192
|
-
return_type: dataclass
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
@dataclass
|
|
196
|
-
class FieldMapsConfig:
|
|
197
|
-
input_fields_map: dict
|
|
198
|
-
output_fields_map: dict
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
@dataclass
|
|
202
|
-
class DefaultTritonConfig:
|
|
203
|
-
input: List[InputConfig] = field(default_factory=list)
|
|
204
|
-
output: List[OutputConfig] = field(default_factory=list)
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
@dataclass
|
|
208
|
-
class ModelConfigClass:
|
|
209
|
-
type: str = field(init=False)
|
|
210
|
-
triton: DefaultTritonConfig
|
|
211
|
-
inference: InferenceConfig
|
|
212
|
-
field_maps: FieldMapsConfig
|
|
213
|
-
|
|
214
|
-
def make_triton_model_config(
|
|
215
|
-
self,
|
|
216
|
-
model_name: str,
|
|
217
|
-
model_version: str,
|
|
218
|
-
image_shape: List = None,
|
|
219
|
-
instance_group: Device = Device(),
|
|
220
|
-
dynamic_batching: DynamicBatching = DynamicBatching(),
|
|
221
|
-
max_batch_size: int = 1,
|
|
222
|
-
backend: str = "python",
|
|
223
|
-
) -> TritonModelConfig:
|
|
224
|
-
|
|
225
|
-
return TritonModelConfig(
|
|
226
|
-
model_type=self.type,
|
|
227
|
-
model_name=model_name,
|
|
228
|
-
model_version=model_version,
|
|
229
|
-
image_shape=image_shape,
|
|
230
|
-
instance_group=instance_group,
|
|
231
|
-
dynamic_batching=dynamic_batching,
|
|
232
|
-
max_batch_size=max_batch_size,
|
|
233
|
-
backend=backend,
|
|
234
|
-
input=self.triton.input,
|
|
235
|
-
output=self.triton.output)
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
def read_config(cfg: str):
|
|
239
|
-
with open(cfg, encoding="utf-8") as f:
|
|
240
|
-
config = yaml.safe_load(f) # model dict
|
|
241
|
-
|
|
242
|
-
# parse default triton
|
|
243
|
-
input_triton_configs = config["triton"]["input"]
|
|
244
|
-
output_triton_configs = config["triton"]["output"]
|
|
245
|
-
triton = DefaultTritonConfig(
|
|
246
|
-
input=[
|
|
247
|
-
InputConfig(
|
|
248
|
-
name=input["name"],
|
|
249
|
-
data_type=eval(f"DType.{input['data_type']}"),
|
|
250
|
-
dims=input["dims"],
|
|
251
|
-
optional=input.get("optional", False),
|
|
252
|
-
) for input in input_triton_configs
|
|
253
|
-
],
|
|
254
|
-
output=[
|
|
255
|
-
OutputConfig(
|
|
256
|
-
name=output["name"],
|
|
257
|
-
data_type=eval(f"DType.{output['data_type']}"),
|
|
258
|
-
dims=output["dims"],
|
|
259
|
-
labels=output["labels"],
|
|
260
|
-
) for output in output_triton_configs
|
|
261
|
-
])
|
|
262
|
-
|
|
263
|
-
# parse inference config
|
|
264
|
-
inference = InferenceConfig(
|
|
265
|
-
wrap_func=eval(config["inference"]["wrap_func"]),
|
|
266
|
-
return_type=eval(config["inference"]["return_type"]),
|
|
267
|
-
)
|
|
268
|
-
|
|
269
|
-
# parse field maps for deployment
|
|
270
|
-
field_maps = FieldMapsConfig(**config["field_maps"])
|
|
271
|
-
|
|
272
|
-
return ModelConfigClass(triton=triton, inference=inference, field_maps=field_maps)
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
def get_model_config(model_type: str) -> ModelConfigClass:
|
|
276
|
-
"""
|
|
277
|
-
Get model config by model type
|
|
278
|
-
|
|
279
|
-
Args:
|
|
280
|
-
|
|
281
|
-
model_type (str): One of field value of ModelTypes
|
|
282
|
-
|
|
283
|
-
Return:
|
|
284
|
-
ModelConfigClass
|
|
285
|
-
|
|
286
|
-
### Example:
|
|
287
|
-
>>> cfg = get_model_config(ModelTypes.text_classifier)
|
|
288
|
-
>>> custom_triton_config = cfg.make_triton_model_config(**kwargs)
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
"""
|
|
292
|
-
import os
|
|
293
|
-
assert model_type in MODEL_TYPES, f"`model_type` must be in {MODEL_TYPES}"
|
|
294
|
-
cfg = read_config(
|
|
295
|
-
os.path.join(os.path.dirname(__file__), "model_types_config", f"{model_type}.yaml"))
|
|
296
|
-
cfg.type = model_type
|
|
297
|
-
return cfg
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
_model_types = ModelTypes()
|
|
301
|
-
MODEL_TYPES = _model_types.all
|
|
302
|
-
del _model_types
|
|
@@ -1,124 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
from dataclasses import asdict, dataclass, field
|
|
3
|
-
from typing import Any, List
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
@dataclass(frozen=True)
|
|
7
|
-
class InferParamType:
|
|
8
|
-
BOOL: int = 1
|
|
9
|
-
STRING: int = 2
|
|
10
|
-
NUMBER: int = 3
|
|
11
|
-
ENCRYPTED_STRING: int = 21
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
@dataclass
|
|
15
|
-
class InferParam:
|
|
16
|
-
path: str
|
|
17
|
-
field_type: InferParamType = field(default_factory=InferParamType)
|
|
18
|
-
default_value: Any = None
|
|
19
|
-
description: str = ""
|
|
20
|
-
|
|
21
|
-
def __post_init__(self):
|
|
22
|
-
assert self.path.isidentifier(
|
|
23
|
-
), f"`path` must be valid for creating python variable, got {self.path}"
|
|
24
|
-
if self.default_value is not None:
|
|
25
|
-
self.validate_type(self.default_value)
|
|
26
|
-
|
|
27
|
-
def validate_type(self, value):
|
|
28
|
-
if self.field_type == InferParamType.BOOL:
|
|
29
|
-
assert isinstance(value, bool), f"`field_type` is `BOOL` (bool), however got {type(value)}"
|
|
30
|
-
elif self.field_type == InferParamType.NUMBER:
|
|
31
|
-
assert isinstance(value, float) or isinstance(
|
|
32
|
-
value, int), f"`field_type` is `NUMBER` (float or int), however got {type(value)}"
|
|
33
|
-
else:
|
|
34
|
-
assert isinstance(
|
|
35
|
-
value,
|
|
36
|
-
str), f"`field_type` is `STRING` or `ENCRYPTED_STRING` (str), however got {type(value)}"
|
|
37
|
-
|
|
38
|
-
def todict(self):
|
|
39
|
-
return {k: v for k, v in asdict(self).items()}
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
@dataclass
|
|
43
|
-
class InferParamManager:
|
|
44
|
-
json_path: str = ""
|
|
45
|
-
params: List[InferParam] = field(default_factory=list)
|
|
46
|
-
_dict_params: dict = field(init=False)
|
|
47
|
-
|
|
48
|
-
@classmethod
|
|
49
|
-
def from_kwargs(cls, **kwargs):
|
|
50
|
-
params = list()
|
|
51
|
-
for k, v in kwargs.items():
|
|
52
|
-
if isinstance(v, str) and k.startswith("_"):
|
|
53
|
-
_type = InferParamType.ENCRYPTED_STRING
|
|
54
|
-
elif isinstance(v, str):
|
|
55
|
-
_type = InferParamType.STRING
|
|
56
|
-
elif isinstance(v, bool):
|
|
57
|
-
_type = InferParamType.BOOL
|
|
58
|
-
elif isinstance(v, float) or isinstance(v, int):
|
|
59
|
-
_type = InferParamType.NUMBER
|
|
60
|
-
else:
|
|
61
|
-
raise TypeError(f"Unsupported type {type(v)} of argument {k}, support {InferParamType}")
|
|
62
|
-
param = InferParam(path=k, field_type=_type, default_value=v, description="")
|
|
63
|
-
params.append(param)
|
|
64
|
-
|
|
65
|
-
return cls(params=params)
|
|
66
|
-
|
|
67
|
-
def __post_init__(self):
|
|
68
|
-
#assert self.params == [] or self.json_path, "`json_path` or `params` must be set"
|
|
69
|
-
self._dict_params = dict()
|
|
70
|
-
if self.params == [] and self.json_path:
|
|
71
|
-
with open(self.json_path, "r") as fp:
|
|
72
|
-
objs = json.load(fp)
|
|
73
|
-
objs = objs if isinstance(objs, list) else [objs]
|
|
74
|
-
self.params = [InferParam(**obj) for obj in objs]
|
|
75
|
-
for param in self.params:
|
|
76
|
-
self._dict_params.update({param.path: param})
|
|
77
|
-
|
|
78
|
-
def get_list_params(self):
|
|
79
|
-
list_params = []
|
|
80
|
-
for each in self.params:
|
|
81
|
-
list_params.append(each.todict())
|
|
82
|
-
return list_params
|
|
83
|
-
|
|
84
|
-
def export(self, path: str):
|
|
85
|
-
list_params = self.get_list_params()
|
|
86
|
-
with open(path, "w") as fp:
|
|
87
|
-
json.dump(list_params, fp, indent=2)
|
|
88
|
-
|
|
89
|
-
def validate(self, **kwargs) -> dict:
|
|
90
|
-
output_kwargs = {k: v.default_value for k, v in self._dict_params.items()}
|
|
91
|
-
assert kwargs == {} or self.params != [], "kwargs are rejected since `params` is empty"
|
|
92
|
-
|
|
93
|
-
for key, value in kwargs.items():
|
|
94
|
-
assert key in self._dict_params, f"param `{key}` is not in setting: {list(self._dict_params.keys())}"
|
|
95
|
-
if key in self._dict_params:
|
|
96
|
-
self._dict_params[key].validate_type(value)
|
|
97
|
-
output_kwargs.update({key: value})
|
|
98
|
-
return output_kwargs
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def is_number(v: str):
|
|
102
|
-
try:
|
|
103
|
-
_ = float(v)
|
|
104
|
-
return True
|
|
105
|
-
except ValueError:
|
|
106
|
-
return False
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
def str_to_number(v: str):
|
|
110
|
-
try:
|
|
111
|
-
return int(v)
|
|
112
|
-
except ValueError:
|
|
113
|
-
return float(v)
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
def parse_req_parameters(req_params: str):
|
|
117
|
-
req_params = json.loads(req_params)
|
|
118
|
-
for k, v in req_params.items():
|
|
119
|
-
if isinstance(v, str):
|
|
120
|
-
if is_number(v):
|
|
121
|
-
v = str_to_number(v)
|
|
122
|
-
req_params.update({k: v})
|
|
123
|
-
|
|
124
|
-
return req_params
|
clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml
DELETED
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
optional: true
|
|
7
|
-
- name: text
|
|
8
|
-
data_type: TYPE_STRING
|
|
9
|
-
dims: [1]
|
|
10
|
-
optional: true
|
|
11
|
-
output:
|
|
12
|
-
- name: embeddings
|
|
13
|
-
data_type: TYPE_FP32
|
|
14
|
-
dims: [-1]
|
|
15
|
-
labels: false
|
|
16
|
-
inference:
|
|
17
|
-
wrap_func: multimodal_embedder
|
|
18
|
-
return_type: EmbeddingOutput
|
|
19
|
-
field_maps:
|
|
20
|
-
input_fields_map:
|
|
21
|
-
image: image
|
|
22
|
-
text: text
|
|
23
|
-
output_fields_map:
|
|
24
|
-
embeddings: embeddings
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: text
|
|
4
|
-
data_type: TYPE_STRING
|
|
5
|
-
dims: [1]
|
|
6
|
-
output:
|
|
7
|
-
- name: softmax_predictions
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1]
|
|
10
|
-
labels: true
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: text_classifier
|
|
13
|
-
return_type: ClassifierOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
text: text
|
|
17
|
-
output_fields_map:
|
|
18
|
-
concepts: softmax_predictions
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: text
|
|
4
|
-
data_type: TYPE_STRING
|
|
5
|
-
dims: [1]
|
|
6
|
-
output:
|
|
7
|
-
- name: embeddings
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1]
|
|
10
|
-
labels: false
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: text_embedder
|
|
13
|
-
return_type: EmbeddingOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
text: text
|
|
17
|
-
output_fields_map:
|
|
18
|
-
embeddings: embeddings
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: text
|
|
4
|
-
data_type: TYPE_STRING
|
|
5
|
-
dims: [1]
|
|
6
|
-
output:
|
|
7
|
-
- name: image
|
|
8
|
-
data_type: TYPE_UINT8
|
|
9
|
-
dims: [-1, -1, 3]
|
|
10
|
-
labels: false
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: text_to_image
|
|
13
|
-
return_type: ImageOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
text: text
|
|
17
|
-
output_fields_map:
|
|
18
|
-
image: image
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: text
|
|
4
|
-
data_type: TYPE_STRING
|
|
5
|
-
dims: [1]
|
|
6
|
-
output:
|
|
7
|
-
- name: text
|
|
8
|
-
data_type: TYPE_STRING
|
|
9
|
-
dims: [1]
|
|
10
|
-
labels: false
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: text_to_text
|
|
13
|
-
return_type: TextOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
text: text
|
|
17
|
-
output_fields_map:
|
|
18
|
-
text: text
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
output:
|
|
7
|
-
- name: softmax_predictions
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1]
|
|
10
|
-
labels: true
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: visual_classifier
|
|
13
|
-
return_type: ClassifierOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
image: image
|
|
17
|
-
output_fields_map:
|
|
18
|
-
concepts: softmax_predictions
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
output:
|
|
7
|
-
- name: predicted_bboxes
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1, 4]
|
|
10
|
-
labels: false
|
|
11
|
-
- name: predicted_labels
|
|
12
|
-
data_type: TYPE_INT32
|
|
13
|
-
dims: [-1, 1]
|
|
14
|
-
labels: true
|
|
15
|
-
- name: predicted_scores
|
|
16
|
-
data_type: TYPE_FP32
|
|
17
|
-
dims: [-1, 1]
|
|
18
|
-
labels: false
|
|
19
|
-
inference:
|
|
20
|
-
wrap_func: visual_detector
|
|
21
|
-
return_type: VisualDetectorOutput
|
|
22
|
-
field_maps:
|
|
23
|
-
input_fields_map:
|
|
24
|
-
image: image
|
|
25
|
-
output_fields_map:
|
|
26
|
-
"regions[...].region_info.bounding_box": "predicted_bboxes"
|
|
27
|
-
"regions[...].data.concepts[...].id": "predicted_labels"
|
|
28
|
-
"regions[...].data.concepts[...].value": "predicted_scores"
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
output:
|
|
7
|
-
- name: embeddings
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1]
|
|
10
|
-
labels: false
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: visual_embedder
|
|
13
|
-
return_type: EmbeddingOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
image: image
|
|
17
|
-
output_fields_map:
|
|
18
|
-
embeddings: embeddings
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
output:
|
|
7
|
-
- name: predicted_mask
|
|
8
|
-
data_type: TYPE_INT64
|
|
9
|
-
dims: [-1, -1]
|
|
10
|
-
labels: true
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: visual_segmenter
|
|
13
|
-
return_type: MasksOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
image: image
|
|
17
|
-
output_fields_map:
|
|
18
|
-
"regions[...].region_info.mask,regions[...].data.concepts": "predicted_mask"
|