clarifai 9.10.2__py3-none-any.whl → 9.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/client/__init__.py +3 -2
- clarifai/client/app.py +39 -23
- clarifai/client/base.py +6 -6
- clarifai/client/dataset.py +113 -55
- clarifai/client/input.py +47 -55
- clarifai/client/model.py +27 -25
- clarifai/client/module.py +13 -11
- clarifai/client/runner.py +5 -3
- clarifai/client/search.py +7 -3
- clarifai/client/user.py +14 -8
- clarifai/client/workflow.py +22 -20
- clarifai/constants/dataset.py +22 -0
- clarifai/datasets/upload/base.py +9 -7
- clarifai/datasets/upload/features.py +3 -3
- clarifai/datasets/upload/image.py +49 -50
- clarifai/datasets/upload/loaders/coco_captions.py +26 -80
- clarifai/datasets/upload/loaders/coco_detection.py +56 -115
- clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
- clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
- clarifai/datasets/upload/loaders/xview_detection.py +3 -3
- clarifai/datasets/upload/text.py +16 -16
- clarifai/datasets/upload/utils.py +196 -21
- clarifai/utils/misc.py +21 -0
- clarifai/versions.py +1 -1
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
- clarifai-9.10.3.dist-info/RECORD +96 -0
- clarifai-9.10.3.dist-info/top_level.txt +1 -0
- clarifai/auth/__init__.py +0 -6
- clarifai/auth/helper.py +0 -367
- clarifai/auth/register.py +0 -23
- clarifai/auth/stub.py +0 -127
- clarifai/datasets/upload/examples/README.md +0 -31
- clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai/datasets/upload/loaders/README.md +0 -49
- clarifai/models/model_serving/README.md +0 -155
- clarifai/models/model_serving/docs/custom_config.md +0 -33
- clarifai/models/model_serving/docs/dependencies.md +0 -11
- clarifai/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai/models/model_serving/docs/model_types.md +0 -20
- clarifai/models/model_serving/docs/output.md +0 -28
- clarifai/models/model_serving/examples/README.md +0 -7
- clarifai/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai/modules/README.md +0 -5
- clarifai/modules/style.css +0 -217
- clarifai-9.10.2.dist-info/RECORD +0 -386
- clarifai-9.10.2.dist-info/top_level.txt +0 -2
- clarifai_utils/__init__.py +0 -0
- clarifai_utils/auth/__init__.py +0 -6
- clarifai_utils/auth/helper.py +0 -367
- clarifai_utils/auth/register.py +0 -23
- clarifai_utils/auth/stub.py +0 -127
- clarifai_utils/cli.py +0 -0
- clarifai_utils/client/__init__.py +0 -16
- clarifai_utils/client/app.py +0 -684
- clarifai_utils/client/auth/__init__.py +0 -4
- clarifai_utils/client/auth/helper.py +0 -367
- clarifai_utils/client/auth/register.py +0 -23
- clarifai_utils/client/auth/stub.py +0 -127
- clarifai_utils/client/base.py +0 -131
- clarifai_utils/client/dataset.py +0 -442
- clarifai_utils/client/input.py +0 -892
- clarifai_utils/client/lister.py +0 -54
- clarifai_utils/client/model.py +0 -575
- clarifai_utils/client/module.py +0 -94
- clarifai_utils/client/runner.py +0 -161
- clarifai_utils/client/search.py +0 -254
- clarifai_utils/client/user.py +0 -253
- clarifai_utils/client/workflow.py +0 -223
- clarifai_utils/constants/model.py +0 -4
- clarifai_utils/constants/search.py +0 -2
- clarifai_utils/datasets/__init__.py +0 -0
- clarifai_utils/datasets/export/__init__.py +0 -0
- clarifai_utils/datasets/export/inputs_annotations.py +0 -222
- clarifai_utils/datasets/upload/__init__.py +0 -0
- clarifai_utils/datasets/upload/base.py +0 -66
- clarifai_utils/datasets/upload/examples/README.md +0 -31
- clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai_utils/datasets/upload/features.py +0 -44
- clarifai_utils/datasets/upload/image.py +0 -165
- clarifai_utils/datasets/upload/loaders/README.md +0 -49
- clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
- clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
- clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
- clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
- clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
- clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
- clarifai_utils/datasets/upload/text.py +0 -53
- clarifai_utils/datasets/upload/utils.py +0 -63
- clarifai_utils/errors.py +0 -89
- clarifai_utils/models/__init__.py +0 -0
- clarifai_utils/models/api.py +0 -283
- clarifai_utils/models/model_serving/README.md +0 -155
- clarifai_utils/models/model_serving/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
- clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
- clarifai_utils/models/model_serving/cli/repository.py +0 -87
- clarifai_utils/models/model_serving/constants.py +0 -1
- clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
- clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
- clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai_utils/models/model_serving/docs/model_types.md +0 -20
- clarifai_utils/models/model_serving/docs/output.md +0 -28
- clarifai_utils/models/model_serving/examples/README.md +0 -7
- clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
- clarifai_utils/models/model_serving/model_config/config.py +0 -302
- clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
- clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
- clarifai_utils/models/model_serving/models/__init__.py +0 -12
- clarifai_utils/models/model_serving/models/default_test.py +0 -275
- clarifai_utils/models/model_serving/models/inference.py +0 -42
- clarifai_utils/models/model_serving/models/model_types.py +0 -265
- clarifai_utils/models/model_serving/models/output.py +0 -124
- clarifai_utils/models/model_serving/models/pb_model.py +0 -74
- clarifai_utils/models/model_serving/models/test.py +0 -64
- clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
- clarifai_utils/modules/README.md +0 -5
- clarifai_utils/modules/__init__.py +0 -0
- clarifai_utils/modules/css.py +0 -60
- clarifai_utils/modules/pages.py +0 -42
- clarifai_utils/modules/style.css +0 -217
- clarifai_utils/runners/__init__.py +0 -0
- clarifai_utils/runners/example.py +0 -33
- clarifai_utils/schema/search.py +0 -69
- clarifai_utils/urls/helper.py +0 -103
- clarifai_utils/utils/__init__.py +0 -0
- clarifai_utils/utils/logging.py +0 -90
- clarifai_utils/utils/misc.py +0 -33
- clarifai_utils/utils/model_train.py +0 -157
- clarifai_utils/versions.py +0 -6
- clarifai_utils/workflows/__init__.py +0 -0
- clarifai_utils/workflows/export.py +0 -68
- clarifai_utils/workflows/utils.py +0 -59
- clarifai_utils/workflows/validate.py +0 -67
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
|
@@ -1,87 +0,0 @@
|
|
|
1
|
-
# Copyright 2023 Clarifai, Inc.
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
-
# you may not use this file except in compliance with the License.
|
|
4
|
-
# You may obtain a copy of the License at
|
|
5
|
-
#
|
|
6
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
-
#
|
|
8
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
-
# See the License for the specific language governing permissions and
|
|
12
|
-
# limitations under the License.
|
|
13
|
-
"""Triton model repository generation commandline interface."""
|
|
14
|
-
|
|
15
|
-
import argparse
|
|
16
|
-
|
|
17
|
-
from ..constants import MAX_HW_DIM
|
|
18
|
-
from ..model_config import MODEL_TYPES, get_model_config
|
|
19
|
-
from ..pb_model_repository import TritonModelRepository
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def dims_type(shape_string: str):
|
|
23
|
-
"""Read list string from cli and convert values to a list of integers."""
|
|
24
|
-
shape_string = shape_string.replace("[", "").replace("]", "")
|
|
25
|
-
shapes = list(map(int, shape_string.split(",")))
|
|
26
|
-
return shapes
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def model_upload_init():
|
|
30
|
-
"""
|
|
31
|
-
Clarifai triton model upload commandline tool.
|
|
32
|
-
"""
|
|
33
|
-
parser = argparse.ArgumentParser(description=__doc__)
|
|
34
|
-
# TritonModelConfig args
|
|
35
|
-
parser.add_argument("--model_name", type=str, required=True, help="Inference Model Name")
|
|
36
|
-
parser.add_argument(
|
|
37
|
-
"--model_version",
|
|
38
|
-
type=str,
|
|
39
|
-
default="1",
|
|
40
|
-
required=False,
|
|
41
|
-
help="Triton inference model version name. 1 stands for version 1. \
|
|
42
|
-
Leave as default value (Recommended).")
|
|
43
|
-
parser.add_argument(
|
|
44
|
-
"--model_type",
|
|
45
|
-
type=str,
|
|
46
|
-
choices=MODEL_TYPES,
|
|
47
|
-
required=True,
|
|
48
|
-
help=f"Clarifai supported model types.\n Model-types-map: {MODEL_TYPES}",
|
|
49
|
-
)
|
|
50
|
-
parser.add_argument(
|
|
51
|
-
"--image_shape",
|
|
52
|
-
type=dims_type,
|
|
53
|
-
default="[-1, -1]",
|
|
54
|
-
required=False,
|
|
55
|
-
help="(H, W) dims for models with an image input type. H and W each have a max value of 1024",
|
|
56
|
-
)
|
|
57
|
-
parser.add_argument(
|
|
58
|
-
"--repo_dir",
|
|
59
|
-
type=str,
|
|
60
|
-
default=".",
|
|
61
|
-
required=True,
|
|
62
|
-
help="Directory to create triton repository.")
|
|
63
|
-
|
|
64
|
-
args = parser.parse_args()
|
|
65
|
-
|
|
66
|
-
if len(args.image_shape) != 2:
|
|
67
|
-
raise ValueError(
|
|
68
|
-
f"image_shape takes 2 values, Height and Width. Got {len(args.image_shape)} values instead."
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
if args.image_shape[0] > MAX_HW_DIM or args.image_shape[1] > MAX_HW_DIM:
|
|
72
|
-
raise ValueError(
|
|
73
|
-
f"H and W each have a maximum value of 1024. Got H: {args.image_shape[0]}, W: {args.image_shape[1]}"
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
model_config = get_model_config(args.model_type).make_triton_model_config(
|
|
77
|
-
model_name=args.model_name,
|
|
78
|
-
model_version="1",
|
|
79
|
-
image_shape=args.image_shape,
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
triton_repo = TritonModelRepository(model_config)
|
|
83
|
-
triton_repo.build_repository(args.repo_dir)
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
if __name__ == "__main__":
|
|
87
|
-
model_upload_init()
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
MAX_HW_DIM = 1024
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
## Custom Triton Configurations
|
|
2
|
-
|
|
3
|
-
The commandline triton model repository generation utils do work with default values for the various triton configurations but a few of these config values can be modified to suit different task specific needs.
|
|
4
|
-
|
|
5
|
-
* For vision models for instance, different input shapes for the `Height (H)` and `Width (W)` are supported and can be set via the commandline too.i.e.
|
|
6
|
-
```console
|
|
7
|
-
$ clarifai-model-upload-init --model_name <Your model name> \
|
|
8
|
-
--model_type <select model type from available ones> \
|
|
9
|
-
--image_shape "H, W"
|
|
10
|
-
--repo_dir <directory in which to create your model repository>
|
|
11
|
-
```
|
|
12
|
-
`H` and `W` each have a maximum value of 1024.
|
|
13
|
-
`--image_shape` accepts both `"H, W"` and `"[H, W]"` format input.
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
## Generating the triton model repository without the commandline
|
|
17
|
-
|
|
18
|
-
The triton model repository can be generated via a python script specifying the same values as required in the commandline. Below is a sample of how the code would be structured with `visual_classifier`.
|
|
19
|
-
|
|
20
|
-
```python
|
|
21
|
-
from clarifai.models.model_serving.model_config import get_model_config, ModelTypes, TritonModelConfig
|
|
22
|
-
from clarifai.models.model_serving.pb_model_repository import TritonModelRepository
|
|
23
|
-
|
|
24
|
-
model_type = ModelTypes.visual_classifier
|
|
25
|
-
model_config: TritonModelConfig = get_model_config(model_type).make_triton_model_config(
|
|
26
|
-
model_name="<model_name>",
|
|
27
|
-
model_version="1",
|
|
28
|
-
image_shape=<[H,W]>, # 0 < [H,W] <= 1024
|
|
29
|
-
)
|
|
30
|
-
|
|
31
|
-
triton_repo = TritonModelRepository(model_config)
|
|
32
|
-
triton_repo.build_repository("<dir>")
|
|
33
|
-
```
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
## Inference Execution Environments
|
|
2
|
-
|
|
3
|
-
Each model built for inference with triton requires certain dependencies & dependency versions be installed for successful inference execution.
|
|
4
|
-
An execution environment is created for each model to be deployed on Clarifai and all necessary dependencies as listed in the `requirements.txt` file are installed there.
|
|
5
|
-
|
|
6
|
-
## Supported python and torch versions
|
|
7
|
-
|
|
8
|
-
Currently, models must use python 3.8 (any 3.8.x). Supported torch versions are 1.13.1 and 2.0.1.
|
|
9
|
-
If your model depends on torch, torch must be listed in your requirements.txt file (even if it is
|
|
10
|
-
already a dependency of another package). An appropriate supported torch version will be selected
|
|
11
|
-
based on your requirements.txt.
|
|
@@ -1,134 +0,0 @@
|
|
|
1
|
-
## Inference paramaters
|
|
2
|
-
|
|
3
|
-
When making prediction, you may need to change some paramaters to adjust the result. Those paramaters will be passed through `paramaters()` of a request in triton python model.
|
|
4
|
-
|
|
5
|
-
In order to send it to `**kwargs` of `get_predictions` in `inference.py`, you can define some parameters and they will be visible and adjustable on Clarifai model view.
|
|
6
|
-
|
|
7
|
-
This document helps you to create your inference parameters that can be visibale and adjustable easily on Clarifai platform. The defined parameters will be sent as `json` file when you use `clarifai-upload-model` cli.
|
|
8
|
-
|
|
9
|
-
### JSON file structure:
|
|
10
|
-
The file contains a list of object has 4 fields:
|
|
11
|
-
* `path` (str): name of your parameter, it must be valid as python variable
|
|
12
|
-
* `field_type` (int): the parameter data type is one of {1,2,21,3}, it means {boolean, string, encrypted_string, number} respectively. `Number` means `int` or `float`. "Encrypted_string is a string that can be used to store your secrets, like API key. The API will not return the values for this as plaintext.
|
|
13
|
-
* `default_value`: a default value of the parameter.
|
|
14
|
-
* `description` (str): short sentence describes what the parameter does
|
|
15
|
-
|
|
16
|
-
An example of 4 parameters:
|
|
17
|
-
```json
|
|
18
|
-
[
|
|
19
|
-
{
|
|
20
|
-
"path": "boolean_var",
|
|
21
|
-
"field_type": 1,
|
|
22
|
-
"default_value": true,
|
|
23
|
-
"description": "a boolean variable"
|
|
24
|
-
},
|
|
25
|
-
{
|
|
26
|
-
"path": "string_var",
|
|
27
|
-
"field_type": 2,
|
|
28
|
-
"default_value": "string_1",
|
|
29
|
-
"description": "a string variable"
|
|
30
|
-
},
|
|
31
|
-
{
|
|
32
|
-
"path": "number_var",
|
|
33
|
-
"field_type": 3,
|
|
34
|
-
"default_value": 9.9,
|
|
35
|
-
"description": "a float number variable"
|
|
36
|
-
},
|
|
37
|
-
{
|
|
38
|
-
"path": "secret_string_var",
|
|
39
|
-
"field_type": 21,
|
|
40
|
-
"default_value": "API_KEY",
|
|
41
|
-
"description": "a string variable contains secret like API key"
|
|
42
|
-
},
|
|
43
|
-
]
|
|
44
|
-
```
|
|
45
|
-
|
|
46
|
-
### Generate JSON file
|
|
47
|
-
1. Manually create the file based on above structure
|
|
48
|
-
2. By code:
|
|
49
|
-
|
|
50
|
-
#### 2.1. Fully setup
|
|
51
|
-
```python
|
|
52
|
-
from clarifai.models.model_serving.model_config.inference_parameter import InferParamManager, InferParam, InferParamType
|
|
53
|
-
|
|
54
|
-
params = [
|
|
55
|
-
InferParam(
|
|
56
|
-
path="boolean_var",
|
|
57
|
-
field_type=InferParamType.BOOL,
|
|
58
|
-
default_value=True,
|
|
59
|
-
description="a boolean varaiabe"
|
|
60
|
-
),
|
|
61
|
-
InferParam(
|
|
62
|
-
path="string_var",
|
|
63
|
-
field_type=InferParamType.STRING,
|
|
64
|
-
default_value="string_1",
|
|
65
|
-
description="a string varaiabe"
|
|
66
|
-
),
|
|
67
|
-
InferParam(
|
|
68
|
-
path="number_var",
|
|
69
|
-
field_type=InferParamType.NUMBER,
|
|
70
|
-
default_value=9.9,
|
|
71
|
-
description="a float number varaiabe"
|
|
72
|
-
),
|
|
73
|
-
InferParam(
|
|
74
|
-
path=secret_string_var",
|
|
75
|
-
field_type=InferParamType.ENCRYPTED_STRING,
|
|
76
|
-
default_value="API_KEY",
|
|
77
|
-
description="a string variable contains secret like API key"
|
|
78
|
-
),
|
|
79
|
-
]
|
|
80
|
-
|
|
81
|
-
ipm = InferParamManager(params=params)
|
|
82
|
-
ipm.export("your_file.json")
|
|
83
|
-
```
|
|
84
|
-
|
|
85
|
-
##### 2.2. Shorten
|
|
86
|
-
`NOTE`: in this way `description` field will be set as empty aka "".
|
|
87
|
-
*You need to modify* `description` in order to be able to upload the settings to Clarifai.
|
|
88
|
-
|
|
89
|
-
`NOTE`: in this way `ENCRYPTED_STRING` type must be defined with "_" prefix
|
|
90
|
-
|
|
91
|
-
```python
|
|
92
|
-
params = dict(boolean_var=True, string_var="string_1", number_var=9.9, _secret_string_var="YOUR_KEY")
|
|
93
|
-
ipm = InferParamManager.from_kwargs(**params)
|
|
94
|
-
ipm.export("your_file.json")
|
|
95
|
-
|
|
96
|
-
```
|
|
97
|
-
|
|
98
|
-
3. In `test.py`. You can define your paramaters like `2.2. Shorten` in `inference_parameters` attribute of `CustomTestInferenceModel`, the file will be generated when you run the test. Keep in mind to change `description`
|
|
99
|
-
|
|
100
|
-
### Usage
|
|
101
|
-
Your defined parameters will be passed through `kwargs` of `InferenceModel.get_predictions` method
|
|
102
|
-
in `inference.py`
|
|
103
|
-
```python
|
|
104
|
-
class InferenceModel:
|
|
105
|
-
def __init__():
|
|
106
|
-
# initialization
|
|
107
|
-
self.model = YourModel()
|
|
108
|
-
|
|
109
|
-
@some_wrapper_function
|
|
110
|
-
def get_predictions(self, input_data, **kwargs):
|
|
111
|
-
# `kwargs` contains your inference parameters
|
|
112
|
-
|
|
113
|
-
# get a value from kwargs
|
|
114
|
-
number_var = kwargs.get("number_var", 9.9)
|
|
115
|
-
|
|
116
|
-
# pass everything to a function
|
|
117
|
-
output = self.model.predict(input_data, **kwargs)
|
|
118
|
-
|
|
119
|
-
return SomeOutputType(output)
|
|
120
|
-
|
|
121
|
-
```
|
|
122
|
-
|
|
123
|
-
in `test.py`
|
|
124
|
-
```python
|
|
125
|
-
class CustomTestInferenceModel:
|
|
126
|
-
inference_parameters = "" # input a path of json file from `2.1` or a dict from `2.2`
|
|
127
|
-
|
|
128
|
-
...
|
|
129
|
-
|
|
130
|
-
def test_something(self):
|
|
131
|
-
input = ...
|
|
132
|
-
output = self.triton_get_predictions(input, number_var=1, string_var="test", _secret="KEY")
|
|
133
|
-
self.assert(...)
|
|
134
|
-
```
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
## Clarifai Model Types
|
|
2
|
-
|
|
3
|
-
Models on the clarifai platform are deployed using the [Triton Inference Server Python Backend](https://github.com/triton-inference-server/python_backend) to allow for pre and post processing of data to and from the model.
|
|
4
|
-
|
|
5
|
-
Inputs into the models are passed as numpy arrays and the predictions are similarly returned as numpy arrays.
|
|
6
|
-
The predictions from user defined models in the [inference script](../README.md#the-inference-script) file have to match certain formats and shapes for the models to be upload compatible.
|
|
7
|
-
|
|
8
|
-
Clarifai [model types](../models/model_types.py) are decorator functions that are responsible for passing input batch requests to user defined inference models to get predictions and format the resultant predictions into Triton Inference responses that are sent by the server for each client inference request.
|
|
9
|
-
|
|
10
|
-
## Supported Model Types Wrapper Functions:
|
|
11
|
-
|
|
12
|
-
- visual_detector
|
|
13
|
-
- visual_classifier
|
|
14
|
-
- text_classifier
|
|
15
|
-
- text_to_text
|
|
16
|
-
- text_embedder
|
|
17
|
-
- text_to_image
|
|
18
|
-
- visual_embedder
|
|
19
|
-
- visual_segmenter
|
|
20
|
-
- multimodal_embedder
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
## Clarifai Model Prediction Output Formats.
|
|
2
|
-
|
|
3
|
-
Different models return different types of predictions and Clarifai output dataclasses aim at standardizing the output formats per model type for compatibility with the Clarifai API.
|
|
4
|
-
|
|
5
|
-
Each machine learning modality supported by the Clarifai API has a predefined dataclass output format with all attributes being of numpy ndarray type.
|
|
6
|
-
|
|
7
|
-
## Supported Formats
|
|
8
|
-
|
|
9
|
-
Usage:
|
|
10
|
-
```python
|
|
11
|
-
from clarifai.models.model_serving.models.output import VisualDetectorOutput
|
|
12
|
-
```
|
|
13
|
-
| Output Type (dataclass) | Attributes | Attribute Data Type| Attribute Shapes | Description |
|
|
14
|
-
| --- | --- | --- | --- | --- |
|
|
15
|
-
| [VisualDetectorOutput](../models/output.py) | `predicted_bboxes` | float32 | [-1, 4] | A 2D detected bounding boxes array of any length with each element array having a length of exactly 4. All bbox coordinates MUST be normalized between 0 & 1. |
|
|
16
|
-
| | `predicted_labels` | int32 | [-1, 1] | A 2D detected labels array of length equal to that of predicted_bboxes with each element array having a length of exactly 1.
|
|
17
|
-
| | `predicted_scores` | float32 | [-1, 1] | A 2D detection scores array of length equal to that of predicted_bboxes & predicted_labels with each element array having a length of exactly 1.
|
|
18
|
-
| | | | | |
|
|
19
|
-
| [ClassifierOutput](../models/output.py) | `predicted_scores` | float32 | [-1] | The softmax of the model's predictions. The index of each predicted probability as returned by the model must correspond to the label index in the labels.txt file |
|
|
20
|
-
| | | | | |
|
|
21
|
-
| [TextOutput](../models/output.py) | `predicted_text` | string | [1] | Predicted text from a model |
|
|
22
|
-
| | | | | |
|
|
23
|
-
| [EmbeddingOutput](../models/output.py) | `embedding_vector` | float32 | [-1] | The embedding vector (image or text embedding) returned by a model |
|
|
24
|
-
| | | | | |
|
|
25
|
-
| [MasksOutput](../models/output.py) | `predicted_mask` | int64 | [-1, -1] | The model predicted image mask. The predicted class indices must be assigned to the corresponding image pixels in the mask where that class is predicted by the model. |
|
|
26
|
-
| | | | | |
|
|
27
|
-
| [ImageOutput](../models/output.py) | `image` | unint8 | [-1, -1, 3] | The model predicted/generated image |
|
|
28
|
-
| | | | | |
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
## Clarifai Model Upload Examples
|
|
2
|
-
|
|
3
|
-
A collection of pre-built triton models for different tasks.
|
|
4
|
-
To run inference locally using any of the examples here, you need to have the [Triton Inference Server](https://github.com/triton-inference-server/server/blob/main/docs/customization_guide/build.md#building-with-docker) installed.
|
|
5
|
-
|
|
6
|
-
Additionally some models may require other files such as checkpoints be downloaded before testing and/or deployment to Clarifai as they are ommitted here due to github file size limits.
|
|
7
|
-
See the Readme files under each model to see if there any additional files required and where to place them.
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
## Image Classification Triton Model Examples
|
|
2
|
-
|
|
3
|
-
These can be used on the fly with minimal or no changes to test deploy image classification models to the Clarifai platform. See the required files section for each model below.
|
|
4
|
-
|
|
5
|
-
* ### [VIT Age Classifier](./age_vit/)
|
|
6
|
-
|
|
7
|
-
Required files to run tests locally:
|
|
8
|
-
|
|
9
|
-
* Download the [model checkpoint from huggingface](https://huggingface.co/nateraw/vit-age-classifier/tree/main) and store it under `age_vit/1/vit-age-classifier/`
|
|
File without changes
|
|
@@ -1,56 +0,0 @@
|
|
|
1
|
-
# This file contains boilerplate code to allow users write their model
|
|
2
|
-
# inference code that will then interact with the Triton Inference Server
|
|
3
|
-
# Python backend to serve end user requests.
|
|
4
|
-
# The module name, module path, class name & get_predictions() method names MUST be maintained as is
|
|
5
|
-
# but other methods may be added within the class as deemed fit provided
|
|
6
|
-
# they are invoked within the main get_predictions() inference method
|
|
7
|
-
# if they play a role in any step of model inference
|
|
8
|
-
"""User model inference script."""
|
|
9
|
-
|
|
10
|
-
import os
|
|
11
|
-
from pathlib import Path
|
|
12
|
-
from typing import Callable
|
|
13
|
-
|
|
14
|
-
import torch
|
|
15
|
-
from scipy.special import softmax
|
|
16
|
-
from transformers import ViTFeatureExtractor, ViTForImageClassification
|
|
17
|
-
|
|
18
|
-
from clarifai.models.model_serving.models.model_types import visual_classifier
|
|
19
|
-
from clarifai.models.model_serving.models.output import ClassifierOutput
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class InferenceModel:
|
|
23
|
-
"""User model inference class."""
|
|
24
|
-
|
|
25
|
-
def __init__(self) -> None:
|
|
26
|
-
"""
|
|
27
|
-
Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
|
|
28
|
-
in this method so they are loaded only once for faster inference.
|
|
29
|
-
"""
|
|
30
|
-
self.base_path: Path = os.path.dirname(__file__)
|
|
31
|
-
self.huggingface_model_path: Path = os.path.join(self.base_path, "vit-age-classifier")
|
|
32
|
-
self.transforms = ViTFeatureExtractor.from_pretrained(self.huggingface_model_path)
|
|
33
|
-
self.model: Callable = ViTForImageClassification.from_pretrained(self.huggingface_model_path)
|
|
34
|
-
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
35
|
-
|
|
36
|
-
@visual_classifier
|
|
37
|
-
def get_predictions(self, input_data) -> ClassifierOutput:
|
|
38
|
-
"""
|
|
39
|
-
Main model inference method.
|
|
40
|
-
|
|
41
|
-
Args:
|
|
42
|
-
-----
|
|
43
|
-
input_data: A single input data item to predict on.
|
|
44
|
-
Input data can be an image or text, etc depending on the model type.
|
|
45
|
-
|
|
46
|
-
Returns:
|
|
47
|
-
--------
|
|
48
|
-
One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
|
|
49
|
-
"""
|
|
50
|
-
# Transform image and pass it to the model
|
|
51
|
-
inputs = self.transforms(input_data, return_tensors='pt')
|
|
52
|
-
output = self.model(**inputs)
|
|
53
|
-
pred_scores = softmax(
|
|
54
|
-
output[0][0].detach().numpy()) # alt: softmax(output.logits[0].detach().numpy())
|
|
55
|
-
|
|
56
|
-
return ClassifierOutput(predicted_scores=pred_scores)
|
|
@@ -1,61 +0,0 @@
|
|
|
1
|
-
# Copyright 2023 Clarifai, Inc.
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
-
# you may not use this file except in compliance with the License.
|
|
4
|
-
# You may obtain a copy of the License at
|
|
5
|
-
#
|
|
6
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
-
#
|
|
8
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
-
# See the License for the specific language governing permissions and
|
|
12
|
-
# limitations under the License.
|
|
13
|
-
"""Triton inference server Python Backend Model."""
|
|
14
|
-
|
|
15
|
-
import os
|
|
16
|
-
import sys
|
|
17
|
-
|
|
18
|
-
try:
|
|
19
|
-
import triton_python_backend_utils as pb_utils
|
|
20
|
-
except ModuleNotFoundError:
|
|
21
|
-
pass
|
|
22
|
-
from google.protobuf import text_format
|
|
23
|
-
from tritonclient.grpc.model_config_pb2 import ModelConfig
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class TritonPythonModel:
|
|
27
|
-
"""
|
|
28
|
-
Triton Python BE Model.
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
def initialize(self, args):
|
|
32
|
-
"""
|
|
33
|
-
Triton server init.
|
|
34
|
-
"""
|
|
35
|
-
args["model_repository"] = args["model_repository"].replace("/1/model.py", "")
|
|
36
|
-
sys.path.append(os.path.dirname(__file__))
|
|
37
|
-
from inference import InferenceModel
|
|
38
|
-
|
|
39
|
-
self.inference_obj = InferenceModel()
|
|
40
|
-
self.device = "cuda:0" if "GPU" in args["model_instance_kind"] else "cpu"
|
|
41
|
-
|
|
42
|
-
# Read input_name from config file
|
|
43
|
-
self.config_msg = ModelConfig()
|
|
44
|
-
with open(os.path.join(args["model_repository"], "config.pbtxt"), "r") as f:
|
|
45
|
-
cfg = f.read()
|
|
46
|
-
text_format.Merge(cfg, self.config_msg)
|
|
47
|
-
self.input_name = [inp.name for inp in self.config_msg.input][0]
|
|
48
|
-
|
|
49
|
-
def execute(self, requests):
|
|
50
|
-
"""
|
|
51
|
-
Serve model inference requests.
|
|
52
|
-
"""
|
|
53
|
-
responses = []
|
|
54
|
-
|
|
55
|
-
for request in requests:
|
|
56
|
-
in_batch = pb_utils.get_input_tensor_by_name(request, self.input_name)
|
|
57
|
-
in_batch = in_batch.as_numpy()
|
|
58
|
-
inference_response = self.inference_obj.get_predictions(in_batch)
|
|
59
|
-
responses.append(inference_response)
|
|
60
|
-
|
|
61
|
-
return responses
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"_name_or_path": "google/vit-base-patch16-224-in21k",
|
|
3
|
-
"architectures": [
|
|
4
|
-
"ViTForImageClassification"
|
|
5
|
-
],
|
|
6
|
-
"attention_probs_dropout_prob": 0.0,
|
|
7
|
-
"hidden_act": "gelu",
|
|
8
|
-
"hidden_dropout_prob": 0.0,
|
|
9
|
-
"hidden_size": 768,
|
|
10
|
-
"id2label": {
|
|
11
|
-
"0": "0-2",
|
|
12
|
-
"1": "3-9",
|
|
13
|
-
"2": "10-19",
|
|
14
|
-
"3": "20-29",
|
|
15
|
-
"4": "30-39",
|
|
16
|
-
"5": "40-49",
|
|
17
|
-
"6": "50-59",
|
|
18
|
-
"7": "60-69",
|
|
19
|
-
"8": "more than 70"
|
|
20
|
-
},
|
|
21
|
-
"image_size": 224,
|
|
22
|
-
"initializer_range": 0.02,
|
|
23
|
-
"intermediate_size": 3072,
|
|
24
|
-
"label2id": {
|
|
25
|
-
"0-2": 0,
|
|
26
|
-
"3-9": 1,
|
|
27
|
-
"10-19": 2,
|
|
28
|
-
"20-29": 3,
|
|
29
|
-
"30-39": 4,
|
|
30
|
-
"40-49": 5,
|
|
31
|
-
"50-59": 6,
|
|
32
|
-
"60-69": 7,
|
|
33
|
-
"more than 70": 8
|
|
34
|
-
},
|
|
35
|
-
"layer_norm_eps": 1e-12,
|
|
36
|
-
"model_type": "vit",
|
|
37
|
-
"num_attention_heads": 12,
|
|
38
|
-
"num_channels": 3,
|
|
39
|
-
"num_hidden_layers": 12,
|
|
40
|
-
"patch_size": 16,
|
|
41
|
-
"transformers_version": "4.5.0.dev0"
|
|
42
|
-
}
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
name: "age_vit"
|
|
2
|
-
max_batch_size: 1
|
|
3
|
-
input {
|
|
4
|
-
name: "image"
|
|
5
|
-
data_type: TYPE_UINT8
|
|
6
|
-
dims: -1
|
|
7
|
-
dims: -1
|
|
8
|
-
dims: 3
|
|
9
|
-
}
|
|
10
|
-
output {
|
|
11
|
-
name: "softmax_predictions"
|
|
12
|
-
data_type: TYPE_FP32
|
|
13
|
-
dims: -1
|
|
14
|
-
label_filename: "labels.txt"
|
|
15
|
-
}
|
|
16
|
-
instance_group {
|
|
17
|
-
count: 1
|
|
18
|
-
kind: KIND_GPU
|
|
19
|
-
}
|
|
20
|
-
dynamic_batching {
|
|
21
|
-
max_queue_delay_microseconds: 500
|
|
22
|
-
}
|
|
23
|
-
backend: "python"
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
## Text Classification Triton Model Examples
|
|
2
|
-
|
|
3
|
-
These can be used on the fly with minimal or no changes to test deploy text classification models to the Clarifai platform. See the required files section for each model below.
|
|
4
|
-
|
|
5
|
-
* ### [XLM-Roberta Tweet Sentiment Classifier](./xlm-roberta/)
|
|
6
|
-
|
|
7
|
-
Required files to run tests locally:
|
|
8
|
-
|
|
9
|
-
* Download the [model checkpoint & sentencepiece bpe model from huggingface](https://huggingface.co/cardiffnlp/twitter-xlm-roberta-base-sentiment/tree/main) and store it under `xlm-roberta/1/twitter-xlm-roberta-base-sentiment/`
|
|
File without changes
|
|
@@ -1,55 +0,0 @@
|
|
|
1
|
-
# This file contains boilerplate code to allow users write their model
|
|
2
|
-
# inference code that will then interact with the Triton Inference Server
|
|
3
|
-
# Python backend to serve end user requests.
|
|
4
|
-
# The module name, module path, class name & get_predictions() method names MUST be maintained as is
|
|
5
|
-
# but other methods may be added within the class as deemed fit provided
|
|
6
|
-
# they are invoked within the main get_predictions() inference method
|
|
7
|
-
# if they play a role in any step of model inference
|
|
8
|
-
"""User model inference script."""
|
|
9
|
-
|
|
10
|
-
import os
|
|
11
|
-
from pathlib import Path
|
|
12
|
-
from typing import Callable
|
|
13
|
-
|
|
14
|
-
import torch
|
|
15
|
-
from scipy.special import softmax
|
|
16
|
-
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
17
|
-
|
|
18
|
-
from clarifai.models.model_serving.models.model_types import text_classifier
|
|
19
|
-
from clarifai.models.model_serving.models.output import ClassifierOutput
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class InferenceModel:
|
|
23
|
-
"""User model inference class."""
|
|
24
|
-
|
|
25
|
-
def __init__(self) -> None:
|
|
26
|
-
"""
|
|
27
|
-
Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
|
|
28
|
-
in this method so they are loaded only once for faster inference.
|
|
29
|
-
"""
|
|
30
|
-
self.base_path: Path = os.path.dirname(__file__)
|
|
31
|
-
self.checkpoint_path: Path = os.path.join(self.base_path, "twitter-xlm-roberta-base-sentiment")
|
|
32
|
-
self.model: Callable = AutoModelForSequenceClassification.from_pretrained(self.checkpoint_path)
|
|
33
|
-
self.tokenizer: Callable = AutoTokenizer.from_pretrained(self.checkpoint_path)
|
|
34
|
-
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
35
|
-
|
|
36
|
-
@text_classifier
|
|
37
|
-
def get_predictions(self, input_data) -> ClassifierOutput:
|
|
38
|
-
"""
|
|
39
|
-
Main model inference method.
|
|
40
|
-
|
|
41
|
-
Args:
|
|
42
|
-
-----
|
|
43
|
-
input_data: A single input data item to predict on.
|
|
44
|
-
Input data can be an image or text, etc depending on the model type.
|
|
45
|
-
|
|
46
|
-
Returns:
|
|
47
|
-
--------
|
|
48
|
-
One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
|
|
49
|
-
"""
|
|
50
|
-
encoded_input = self.tokenizer(input_data, return_tensors='pt')
|
|
51
|
-
output = self.model(**encoded_input)
|
|
52
|
-
scores = output[0][0].detach().numpy()
|
|
53
|
-
scores = softmax(scores)
|
|
54
|
-
|
|
55
|
-
return ClassifierOutput(predicted_scores=scores)
|