clarifai 9.10.2__py3-none-any.whl → 9.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/client/__init__.py +3 -2
- clarifai/client/app.py +39 -23
- clarifai/client/base.py +6 -6
- clarifai/client/dataset.py +113 -55
- clarifai/client/input.py +47 -55
- clarifai/client/model.py +27 -25
- clarifai/client/module.py +13 -11
- clarifai/client/runner.py +5 -3
- clarifai/client/search.py +7 -3
- clarifai/client/user.py +14 -8
- clarifai/client/workflow.py +22 -20
- clarifai/constants/dataset.py +22 -0
- clarifai/datasets/upload/base.py +9 -7
- clarifai/datasets/upload/features.py +3 -3
- clarifai/datasets/upload/image.py +49 -50
- clarifai/datasets/upload/loaders/coco_captions.py +26 -80
- clarifai/datasets/upload/loaders/coco_detection.py +56 -115
- clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
- clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
- clarifai/datasets/upload/loaders/xview_detection.py +3 -3
- clarifai/datasets/upload/text.py +16 -16
- clarifai/datasets/upload/utils.py +196 -21
- clarifai/utils/misc.py +21 -0
- clarifai/versions.py +1 -1
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
- clarifai-9.10.3.dist-info/RECORD +96 -0
- clarifai-9.10.3.dist-info/top_level.txt +1 -0
- clarifai/auth/__init__.py +0 -6
- clarifai/auth/helper.py +0 -367
- clarifai/auth/register.py +0 -23
- clarifai/auth/stub.py +0 -127
- clarifai/datasets/upload/examples/README.md +0 -31
- clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai/datasets/upload/loaders/README.md +0 -49
- clarifai/models/model_serving/README.md +0 -155
- clarifai/models/model_serving/docs/custom_config.md +0 -33
- clarifai/models/model_serving/docs/dependencies.md +0 -11
- clarifai/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai/models/model_serving/docs/model_types.md +0 -20
- clarifai/models/model_serving/docs/output.md +0 -28
- clarifai/models/model_serving/examples/README.md +0 -7
- clarifai/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai/modules/README.md +0 -5
- clarifai/modules/style.css +0 -217
- clarifai-9.10.2.dist-info/RECORD +0 -386
- clarifai-9.10.2.dist-info/top_level.txt +0 -2
- clarifai_utils/__init__.py +0 -0
- clarifai_utils/auth/__init__.py +0 -6
- clarifai_utils/auth/helper.py +0 -367
- clarifai_utils/auth/register.py +0 -23
- clarifai_utils/auth/stub.py +0 -127
- clarifai_utils/cli.py +0 -0
- clarifai_utils/client/__init__.py +0 -16
- clarifai_utils/client/app.py +0 -684
- clarifai_utils/client/auth/__init__.py +0 -4
- clarifai_utils/client/auth/helper.py +0 -367
- clarifai_utils/client/auth/register.py +0 -23
- clarifai_utils/client/auth/stub.py +0 -127
- clarifai_utils/client/base.py +0 -131
- clarifai_utils/client/dataset.py +0 -442
- clarifai_utils/client/input.py +0 -892
- clarifai_utils/client/lister.py +0 -54
- clarifai_utils/client/model.py +0 -575
- clarifai_utils/client/module.py +0 -94
- clarifai_utils/client/runner.py +0 -161
- clarifai_utils/client/search.py +0 -254
- clarifai_utils/client/user.py +0 -253
- clarifai_utils/client/workflow.py +0 -223
- clarifai_utils/constants/model.py +0 -4
- clarifai_utils/constants/search.py +0 -2
- clarifai_utils/datasets/__init__.py +0 -0
- clarifai_utils/datasets/export/__init__.py +0 -0
- clarifai_utils/datasets/export/inputs_annotations.py +0 -222
- clarifai_utils/datasets/upload/__init__.py +0 -0
- clarifai_utils/datasets/upload/base.py +0 -66
- clarifai_utils/datasets/upload/examples/README.md +0 -31
- clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai_utils/datasets/upload/features.py +0 -44
- clarifai_utils/datasets/upload/image.py +0 -165
- clarifai_utils/datasets/upload/loaders/README.md +0 -49
- clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
- clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
- clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
- clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
- clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
- clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
- clarifai_utils/datasets/upload/text.py +0 -53
- clarifai_utils/datasets/upload/utils.py +0 -63
- clarifai_utils/errors.py +0 -89
- clarifai_utils/models/__init__.py +0 -0
- clarifai_utils/models/api.py +0 -283
- clarifai_utils/models/model_serving/README.md +0 -155
- clarifai_utils/models/model_serving/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
- clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
- clarifai_utils/models/model_serving/cli/repository.py +0 -87
- clarifai_utils/models/model_serving/constants.py +0 -1
- clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
- clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
- clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai_utils/models/model_serving/docs/model_types.md +0 -20
- clarifai_utils/models/model_serving/docs/output.md +0 -28
- clarifai_utils/models/model_serving/examples/README.md +0 -7
- clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
- clarifai_utils/models/model_serving/model_config/config.py +0 -302
- clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
- clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
- clarifai_utils/models/model_serving/models/__init__.py +0 -12
- clarifai_utils/models/model_serving/models/default_test.py +0 -275
- clarifai_utils/models/model_serving/models/inference.py +0 -42
- clarifai_utils/models/model_serving/models/model_types.py +0 -265
- clarifai_utils/models/model_serving/models/output.py +0 -124
- clarifai_utils/models/model_serving/models/pb_model.py +0 -74
- clarifai_utils/models/model_serving/models/test.py +0 -64
- clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
- clarifai_utils/modules/README.md +0 -5
- clarifai_utils/modules/__init__.py +0 -0
- clarifai_utils/modules/css.py +0 -60
- clarifai_utils/modules/pages.py +0 -42
- clarifai_utils/modules/style.css +0 -217
- clarifai_utils/runners/__init__.py +0 -0
- clarifai_utils/runners/example.py +0 -33
- clarifai_utils/schema/search.py +0 -69
- clarifai_utils/urls/helper.py +0 -103
- clarifai_utils/utils/__init__.py +0 -0
- clarifai_utils/utils/logging.py +0 -90
- clarifai_utils/utils/misc.py +0 -33
- clarifai_utils/utils/model_train.py +0 -157
- clarifai_utils/versions.py +0 -6
- clarifai_utils/workflows/__init__.py +0 -0
- clarifai_utils/workflows/export.py +0 -68
- clarifai_utils/workflows/utils.py +0 -59
- clarifai_utils/workflows/validate.py +0 -67
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
## Text Embedding Triton Model Examples
|
|
2
|
-
|
|
3
|
-
These can be used on the fly with minimal or no changes to test deploy text embedding models to the Clarifai platform. See the required files section for each model below.
|
|
4
|
-
|
|
5
|
-
* ### [Instructor-xl](https://huggingface.co/hkunlp/instructor-xl)
|
|
6
|
-
|
|
7
|
-
Requirements to run tests locally:
|
|
8
|
-
|
|
9
|
-
* Download/Clone the [huggingface model](https://huggingface.co/hkunlp/instructor-xl) into the **instructor-xl/1/** directory then start the triton server.
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
## Text to Image Triton Model Examples
|
|
2
|
-
|
|
3
|
-
These can be used on the fly with minimal or no changes to test deploy text to image models to the Clarifai platform. See the required files section for each model below.
|
|
4
|
-
|
|
5
|
-
* ### [sd-v1.5 (Stable-Diffusion-v1.5)](./sd-v1.5/)
|
|
6
|
-
|
|
7
|
-
Requirements to run tests locally:
|
|
8
|
-
|
|
9
|
-
* Download/Clone the [huggingface model](https://huggingface.co/runwayml/stable-diffusion-v1-5) into the **sd-v1.5/1/** directory then start the triton server.
|
|
File without changes
|
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
# This file contains boilerplate code to allow users write their model
|
|
2
|
-
# inference code that will then interact with the Triton Inference Server
|
|
3
|
-
# Python backend to serve end user requests.
|
|
4
|
-
# The module name, module path, class name & get_predictions() method names MUST be maintained as is
|
|
5
|
-
# but other methods may be added within the class as deemed fit provided
|
|
6
|
-
# they are invoked within the main get_predictions() inference method
|
|
7
|
-
# if they play a role in any step of model inference
|
|
8
|
-
"""User model inference script."""
|
|
9
|
-
|
|
10
|
-
import os
|
|
11
|
-
from pathlib import Path
|
|
12
|
-
|
|
13
|
-
import numpy as np
|
|
14
|
-
import torch
|
|
15
|
-
from diffusers import StableDiffusionPipeline
|
|
16
|
-
|
|
17
|
-
from clarifai.models.model_serving.models.model_types import text_to_image
|
|
18
|
-
from clarifai.models.model_serving.models.output import ImageOutput
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class InferenceModel:
|
|
22
|
-
"""User model inference class."""
|
|
23
|
-
|
|
24
|
-
def __init__(self) -> None:
|
|
25
|
-
"""
|
|
26
|
-
Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
|
|
27
|
-
in this method so they are loaded only once for faster inference.
|
|
28
|
-
"""
|
|
29
|
-
self.base_path: Path = os.path.dirname(__file__)
|
|
30
|
-
self.huggingface_model_path = os.path.join(self.base_path, "stable-diffusion-v1-5")
|
|
31
|
-
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
32
|
-
self.pipeline = StableDiffusionPipeline.from_pretrained(
|
|
33
|
-
self.huggingface_model_path, torch_dtype=torch.float16)
|
|
34
|
-
self.pipeline = self.pipeline.to(self.device)
|
|
35
|
-
|
|
36
|
-
@text_to_image
|
|
37
|
-
def get_predictions(self, input_data):
|
|
38
|
-
"""
|
|
39
|
-
Main model inference method.
|
|
40
|
-
|
|
41
|
-
Args:
|
|
42
|
-
-----
|
|
43
|
-
input_data: A single input data item to predict on.
|
|
44
|
-
Input data can be an image or text, etc depending on the model type.
|
|
45
|
-
|
|
46
|
-
Returns:
|
|
47
|
-
--------
|
|
48
|
-
One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
|
|
49
|
-
"""
|
|
50
|
-
out_image = self.pipeline(input_data).images[0]
|
|
51
|
-
out_image = np.asarray(out_image)
|
|
52
|
-
return ImageOutput(image=out_image)
|
|
@@ -1,60 +0,0 @@
|
|
|
1
|
-
# Copyright 2023 Clarifai, Inc.
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
-
# you may not use this file except in compliance with the License.
|
|
4
|
-
# You may obtain a copy of the License at
|
|
5
|
-
#
|
|
6
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
-
#
|
|
8
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
-
# See the License for the specific language governing permissions and
|
|
12
|
-
# limitations under the License.
|
|
13
|
-
"""Triton inference server Python Backend Model."""
|
|
14
|
-
|
|
15
|
-
import os
|
|
16
|
-
import sys
|
|
17
|
-
|
|
18
|
-
try:
|
|
19
|
-
import triton_python_backend_utils as pb_utils
|
|
20
|
-
except ModuleNotFoundError:
|
|
21
|
-
pass
|
|
22
|
-
from google.protobuf import text_format
|
|
23
|
-
from tritonclient.grpc.model_config_pb2 import ModelConfig
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class TritonPythonModel:
|
|
27
|
-
"""
|
|
28
|
-
Triton Python BE Model.
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
def initialize(self, args):
|
|
32
|
-
"""
|
|
33
|
-
Triton server init.
|
|
34
|
-
"""
|
|
35
|
-
args["model_repository"] = args["model_repository"].replace("/1/model.py", "")
|
|
36
|
-
sys.path.append(os.path.dirname(__file__))
|
|
37
|
-
from inference import InferenceModel
|
|
38
|
-
|
|
39
|
-
self.inference_obj = InferenceModel()
|
|
40
|
-
|
|
41
|
-
# Read input_name from config file
|
|
42
|
-
self.config_msg = ModelConfig()
|
|
43
|
-
with open(os.path.join(args["model_repository"], "config.pbtxt"), "r") as f:
|
|
44
|
-
cfg = f.read()
|
|
45
|
-
text_format.Merge(cfg, self.config_msg)
|
|
46
|
-
self.input_name = [inp.name for inp in self.config_msg.input][0]
|
|
47
|
-
|
|
48
|
-
def execute(self, requests):
|
|
49
|
-
"""
|
|
50
|
-
Serve model inference requests.
|
|
51
|
-
"""
|
|
52
|
-
responses = []
|
|
53
|
-
|
|
54
|
-
for request in requests:
|
|
55
|
-
in_batch = pb_utils.get_input_tensor_by_name(request, self.input_name)
|
|
56
|
-
in_batch = in_batch.as_numpy()
|
|
57
|
-
inference_response = self.inference_obj.get_predictions(in_batch)
|
|
58
|
-
responses.append(inference_response)
|
|
59
|
-
|
|
60
|
-
return responses
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
name: "sd-v1.5"
|
|
2
|
-
max_batch_size: 1
|
|
3
|
-
input {
|
|
4
|
-
name: "text"
|
|
5
|
-
data_type: TYPE_STRING
|
|
6
|
-
dims: 1
|
|
7
|
-
}
|
|
8
|
-
output {
|
|
9
|
-
name: "image"
|
|
10
|
-
data_type: TYPE_UINT8
|
|
11
|
-
dims: -1
|
|
12
|
-
dims: -1
|
|
13
|
-
dims: 3
|
|
14
|
-
}
|
|
15
|
-
instance_group {
|
|
16
|
-
count: 1
|
|
17
|
-
kind: KIND_GPU
|
|
18
|
-
}
|
|
19
|
-
dynamic_batching {
|
|
20
|
-
max_queue_delay_microseconds: 500
|
|
21
|
-
}
|
|
22
|
-
backend: "python"
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
## Text-to-Text Triton Model Examples
|
|
2
|
-
|
|
3
|
-
These can be used on the fly with minimal or no changes to test deploy all models that take a text input and yield a text output prediction e.g. text generation, summarization and translation models to the Clarifai platform. See the required files section for each model below.
|
|
4
|
-
|
|
5
|
-
* ### [Bart-paper2slides-summarizer](https://huggingface.co/com3dian/Bart-large-paper2slides-summarizer)
|
|
6
|
-
|
|
7
|
-
Requirements to run tests locally:
|
|
8
|
-
|
|
9
|
-
* Download/Clone the [huggingface model](https://huggingface.co/com3dian/Bart-large-paper2slides-summarizer) and store it under the **bart-summarize/1/** directory.
|
|
10
|
-
* Rename the downloaded folder to **bart-large-summarizer** OR change the **self.huggingface_model_path** attribute in the [inference.py script](./bart-summarize/1/inference.py) to match the folder name
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
name: "bart-summarize"
|
|
2
|
-
max_batch_size: 1
|
|
3
|
-
input {
|
|
4
|
-
name: "text"
|
|
5
|
-
data_type: TYPE_STRING
|
|
6
|
-
dims: 1
|
|
7
|
-
}
|
|
8
|
-
output {
|
|
9
|
-
name: "text"
|
|
10
|
-
data_type: TYPE_STRING
|
|
11
|
-
dims: 1
|
|
12
|
-
}
|
|
13
|
-
instance_group {
|
|
14
|
-
count: 1
|
|
15
|
-
kind: KIND_GPU
|
|
16
|
-
}
|
|
17
|
-
dynamic_batching {
|
|
18
|
-
max_queue_delay_microseconds: 500
|
|
19
|
-
}
|
|
20
|
-
backend: "python"
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
## Visual Detection Triton Model Examples
|
|
2
|
-
|
|
3
|
-
These can be used on the fly with minimal or no changes to test deploy visual detection models to the Clarifai platform. See the required files section for each model below.
|
|
4
|
-
|
|
5
|
-
* ### [Yolov5x](./yolov5x/)
|
|
6
|
-
|
|
7
|
-
Required files (not included here due to upload size limits):
|
|
8
|
-
|
|
9
|
-
* Download the yolov5x folder from above.
|
|
10
|
-
* Download the `Yolov5 repo` and the `yolov5-x checkpoint` and store them under the `1/` directory of the yolov5x folder.
|
|
11
|
-
* zip and test deploy to your Clarifai app
|
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
name: "yolov5_test"
|
|
2
|
-
max_batch_size: 1
|
|
3
|
-
input {
|
|
4
|
-
name: "image"
|
|
5
|
-
data_type: TYPE_UINT8
|
|
6
|
-
dims: -1
|
|
7
|
-
dims: -1
|
|
8
|
-
dims: 3
|
|
9
|
-
}
|
|
10
|
-
output {
|
|
11
|
-
name: "predicted_bboxes"
|
|
12
|
-
data_type: TYPE_FP32
|
|
13
|
-
dims: -1
|
|
14
|
-
dims: 4
|
|
15
|
-
}
|
|
16
|
-
output {
|
|
17
|
-
name: "predicted_labels"
|
|
18
|
-
data_type: TYPE_INT32
|
|
19
|
-
dims: -1
|
|
20
|
-
dims: 1
|
|
21
|
-
label_filename: "labels.txt"
|
|
22
|
-
}
|
|
23
|
-
output {
|
|
24
|
-
name: "predicted_scores"
|
|
25
|
-
data_type: TYPE_FP32
|
|
26
|
-
dims: -1
|
|
27
|
-
dims: 1
|
|
28
|
-
}
|
|
29
|
-
instance_group {
|
|
30
|
-
count: 1
|
|
31
|
-
kind: KIND_GPU
|
|
32
|
-
}
|
|
33
|
-
dynamic_batching {
|
|
34
|
-
max_queue_delay_microseconds: 500
|
|
35
|
-
}
|
|
36
|
-
backend: "python"
|
|
@@ -1,80 +0,0 @@
|
|
|
1
|
-
person
|
|
2
|
-
bicycle
|
|
3
|
-
car
|
|
4
|
-
motorcycle
|
|
5
|
-
airplane
|
|
6
|
-
bus
|
|
7
|
-
train
|
|
8
|
-
truck
|
|
9
|
-
boat
|
|
10
|
-
traffic-light
|
|
11
|
-
fire-hydrant
|
|
12
|
-
stop-sign
|
|
13
|
-
parking-meter
|
|
14
|
-
bench
|
|
15
|
-
bird
|
|
16
|
-
cat
|
|
17
|
-
dog
|
|
18
|
-
horse
|
|
19
|
-
sheep
|
|
20
|
-
cow
|
|
21
|
-
elephant
|
|
22
|
-
bear
|
|
23
|
-
zebra
|
|
24
|
-
giraffe
|
|
25
|
-
backpack
|
|
26
|
-
umbrella
|
|
27
|
-
handbag
|
|
28
|
-
tie
|
|
29
|
-
suitcase
|
|
30
|
-
frisbee
|
|
31
|
-
skis
|
|
32
|
-
snowboard
|
|
33
|
-
sports-ball
|
|
34
|
-
kite
|
|
35
|
-
baseball-bat
|
|
36
|
-
baseball-glove
|
|
37
|
-
skateboard
|
|
38
|
-
surfboard
|
|
39
|
-
tennis-racket
|
|
40
|
-
bottle
|
|
41
|
-
wine-glass
|
|
42
|
-
cup
|
|
43
|
-
fork
|
|
44
|
-
knife
|
|
45
|
-
spoon
|
|
46
|
-
bowl
|
|
47
|
-
banana
|
|
48
|
-
apple
|
|
49
|
-
sandwich
|
|
50
|
-
orange
|
|
51
|
-
broccoli
|
|
52
|
-
carrot
|
|
53
|
-
hot-dog
|
|
54
|
-
pizza
|
|
55
|
-
donut
|
|
56
|
-
cake
|
|
57
|
-
chair
|
|
58
|
-
couch
|
|
59
|
-
potted-plant
|
|
60
|
-
bed
|
|
61
|
-
dining-table
|
|
62
|
-
toilet
|
|
63
|
-
tv
|
|
64
|
-
laptop
|
|
65
|
-
mouse
|
|
66
|
-
remote
|
|
67
|
-
keyboard
|
|
68
|
-
cell-phone
|
|
69
|
-
microwave
|
|
70
|
-
oven
|
|
71
|
-
toaster
|
|
72
|
-
sink
|
|
73
|
-
refrigerator
|
|
74
|
-
book
|
|
75
|
-
clock
|
|
76
|
-
vase
|
|
77
|
-
scissors
|
|
78
|
-
teddy-bear
|
|
79
|
-
hair-drier
|
|
80
|
-
toothbrush
|
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
# YOLOv5 requirements
|
|
2
|
-
tritonclient[all]
|
|
3
|
-
clarifai>9.5.3 # for model upload features
|
|
4
|
-
matplotlib>=3.2.2
|
|
5
|
-
opencv-python>=4.1.1
|
|
6
|
-
Pillow>=7.1.2
|
|
7
|
-
PyYAML>=5.3.1
|
|
8
|
-
torch>=1.7.0,<2.0
|
|
9
|
-
torchvision>=0.8.1
|
|
10
|
-
protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012
|
|
11
|
-
pandas>=1.1.4
|
|
12
|
-
seaborn>=0.11.0
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
## Visual Embedding Triton Model Examples
|
|
2
|
-
|
|
3
|
-
These can be used on the fly with minimal or no changes to test deploy visual embedding models to the Clarifai platform. See the required files section for each model below.
|
|
4
|
-
|
|
5
|
-
* ### [vit-base](./vit-base/)
|
|
6
|
-
|
|
7
|
-
Requirements to run tests locally:
|
|
8
|
-
|
|
9
|
-
* Download/Clone the [huggingface model](https://huggingface.co/google/vit-base-patch16-224) into the **vit-base/1/** directory then start the triton server.
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
name: "vit-base"
|
|
2
|
-
max_batch_size: 1
|
|
3
|
-
input {
|
|
4
|
-
name: "image"
|
|
5
|
-
data_type: TYPE_UINT8
|
|
6
|
-
dims: -1
|
|
7
|
-
dims: -1
|
|
8
|
-
dims: 3
|
|
9
|
-
}
|
|
10
|
-
output {
|
|
11
|
-
name: "embeddings"
|
|
12
|
-
data_type: TYPE_FP32
|
|
13
|
-
dims: -1
|
|
14
|
-
}
|
|
15
|
-
instance_group {
|
|
16
|
-
count: 1
|
|
17
|
-
kind: KIND_GPU
|
|
18
|
-
}
|
|
19
|
-
dynamic_batching {
|
|
20
|
-
max_queue_delay_microseconds: 500
|
|
21
|
-
}
|
|
22
|
-
backend: "python"
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
## Visual Segmentation Triton Model Examples
|
|
2
|
-
|
|
3
|
-
These can be used on the fly with minimal or no changes to test deploy visual segmentation models to the Clarifai platform. See the required files section for each model below.
|
|
4
|
-
|
|
5
|
-
* ### [segformer-b2](./segformer-b2/)
|
|
6
|
-
|
|
7
|
-
Requirements to run tests locally:
|
|
8
|
-
|
|
9
|
-
* Download/Clone the [huggingface model](https://huggingface.co/mattmdjaga/segformer_b2_clothes) into the **segformer-b2/1/** directory then start the triton server.
|
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
name: "segformer-b2"
|
|
2
|
-
max_batch_size: 1
|
|
3
|
-
input {
|
|
4
|
-
name: "image"
|
|
5
|
-
data_type: TYPE_UINT8
|
|
6
|
-
dims: -1
|
|
7
|
-
dims: -1
|
|
8
|
-
dims: 3
|
|
9
|
-
}
|
|
10
|
-
output {
|
|
11
|
-
name: "predicted_mask"
|
|
12
|
-
data_type: TYPE_INT64
|
|
13
|
-
dims: -1
|
|
14
|
-
dims: -1
|
|
15
|
-
label_filename: "labels.txt"
|
|
16
|
-
}
|
|
17
|
-
instance_group {
|
|
18
|
-
count: 1
|
|
19
|
-
kind: KIND_GPU
|
|
20
|
-
}
|
|
21
|
-
dynamic_batching {
|
|
22
|
-
max_queue_delay_microseconds: 500
|
|
23
|
-
}
|
|
24
|
-
backend: "python"
|
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
optional: true
|
|
7
|
-
- name: text
|
|
8
|
-
data_type: TYPE_STRING
|
|
9
|
-
dims: [1]
|
|
10
|
-
optional: true
|
|
11
|
-
output:
|
|
12
|
-
- name: embeddings
|
|
13
|
-
data_type: TYPE_FP32
|
|
14
|
-
dims: [-1]
|
|
15
|
-
labels: false
|
|
16
|
-
inference:
|
|
17
|
-
wrap_func: multimodal_embedder
|
|
18
|
-
return_type: EmbeddingOutput
|
|
19
|
-
field_maps:
|
|
20
|
-
input_fields_map:
|
|
21
|
-
image: image
|
|
22
|
-
text: text
|
|
23
|
-
output_fields_map:
|
|
24
|
-
embeddings: embeddings
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: text
|
|
4
|
-
data_type: TYPE_STRING
|
|
5
|
-
dims: [1]
|
|
6
|
-
output:
|
|
7
|
-
- name: softmax_predictions
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1]
|
|
10
|
-
labels: true
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: text_classifier
|
|
13
|
-
return_type: ClassifierOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
text: text
|
|
17
|
-
output_fields_map:
|
|
18
|
-
concepts: softmax_predictions
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: text
|
|
4
|
-
data_type: TYPE_STRING
|
|
5
|
-
dims: [1]
|
|
6
|
-
output:
|
|
7
|
-
- name: embeddings
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1]
|
|
10
|
-
labels: false
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: text_embedder
|
|
13
|
-
return_type: EmbeddingOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
text: text
|
|
17
|
-
output_fields_map:
|
|
18
|
-
embeddings: embeddings
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: text
|
|
4
|
-
data_type: TYPE_STRING
|
|
5
|
-
dims: [1]
|
|
6
|
-
output:
|
|
7
|
-
- name: image
|
|
8
|
-
data_type: TYPE_UINT8
|
|
9
|
-
dims: [-1, -1, 3]
|
|
10
|
-
labels: false
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: text_to_image
|
|
13
|
-
return_type: ImageOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
text: text
|
|
17
|
-
output_fields_map:
|
|
18
|
-
image: image
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: text
|
|
4
|
-
data_type: TYPE_STRING
|
|
5
|
-
dims: [1]
|
|
6
|
-
output:
|
|
7
|
-
- name: text
|
|
8
|
-
data_type: TYPE_STRING
|
|
9
|
-
dims: [1]
|
|
10
|
-
labels: false
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: text_to_text
|
|
13
|
-
return_type: TextOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
text: text
|
|
17
|
-
output_fields_map:
|
|
18
|
-
text: text
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
output:
|
|
7
|
-
- name: softmax_predictions
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1]
|
|
10
|
-
labels: true
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: visual_classifier
|
|
13
|
-
return_type: ClassifierOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
image: image
|
|
17
|
-
output_fields_map:
|
|
18
|
-
concepts: softmax_predictions
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
output:
|
|
7
|
-
- name: predicted_bboxes
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1, 4]
|
|
10
|
-
labels: false
|
|
11
|
-
- name: predicted_labels
|
|
12
|
-
data_type: TYPE_INT32
|
|
13
|
-
dims: [-1, 1]
|
|
14
|
-
labels: true
|
|
15
|
-
- name: predicted_scores
|
|
16
|
-
data_type: TYPE_FP32
|
|
17
|
-
dims: [-1, 1]
|
|
18
|
-
labels: false
|
|
19
|
-
inference:
|
|
20
|
-
wrap_func: visual_detector
|
|
21
|
-
return_type: VisualDetectorOutput
|
|
22
|
-
field_maps:
|
|
23
|
-
input_fields_map:
|
|
24
|
-
image: image
|
|
25
|
-
output_fields_map:
|
|
26
|
-
"regions[...].region_info.bounding_box": "predicted_bboxes"
|
|
27
|
-
"regions[...].data.concepts[...].id": "predicted_labels"
|
|
28
|
-
"regions[...].data.concepts[...].value": "predicted_scores"
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
output:
|
|
7
|
-
- name: embeddings
|
|
8
|
-
data_type: TYPE_FP32
|
|
9
|
-
dims: [-1]
|
|
10
|
-
labels: false
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: visual_embedder
|
|
13
|
-
return_type: EmbeddingOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
image: image
|
|
17
|
-
output_fields_map:
|
|
18
|
-
embeddings: embeddings
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
triton:
|
|
2
|
-
input:
|
|
3
|
-
- name: image
|
|
4
|
-
data_type: TYPE_UINT8
|
|
5
|
-
dims: [-1, -1, 3]
|
|
6
|
-
output:
|
|
7
|
-
- name: predicted_mask
|
|
8
|
-
data_type: TYPE_INT64
|
|
9
|
-
dims: [-1, -1]
|
|
10
|
-
labels: true
|
|
11
|
-
inference:
|
|
12
|
-
wrap_func: visual_segmenter
|
|
13
|
-
return_type: MasksOutput
|
|
14
|
-
field_maps:
|
|
15
|
-
input_fields_map:
|
|
16
|
-
image: image
|
|
17
|
-
output_fields_map:
|
|
18
|
-
"regions[...].region_info.mask,regions[...].data.concepts": "predicted_mask"
|
clarifai/modules/README.md
DELETED
|
@@ -1,5 +0,0 @@
|
|
|
1
|
-
# Module Utils
|
|
2
|
-
|
|
3
|
-
Additional helper functions for creating Clarifai Modules should be placed here so that they can be reused across modules.
|
|
4
|
-
|
|
5
|
-
This should still not import streamlit as we want to keep clarifai-python-utils lightweight. If you find we need utilities for streamlit itself we should start a new repo for that. Please contact support@clarifai.com to do so.
|