clarifai 9.10.1__py3-none-any.whl → 9.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/client/__init__.py +3 -2
- clarifai/client/app.py +39 -23
- clarifai/client/base.py +6 -6
- clarifai/client/dataset.py +113 -55
- clarifai/client/input.py +47 -55
- clarifai/client/model.py +27 -25
- clarifai/client/module.py +13 -11
- clarifai/client/runner.py +5 -3
- clarifai/client/search.py +29 -10
- clarifai/client/user.py +14 -8
- clarifai/client/workflow.py +22 -20
- clarifai/constants/dataset.py +22 -0
- clarifai/datasets/upload/base.py +9 -7
- clarifai/datasets/upload/features.py +3 -3
- clarifai/datasets/upload/image.py +49 -50
- clarifai/datasets/upload/loaders/coco_captions.py +26 -80
- clarifai/datasets/upload/loaders/coco_detection.py +56 -115
- clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
- clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
- clarifai/datasets/upload/loaders/xview_detection.py +3 -3
- clarifai/datasets/upload/text.py +16 -16
- clarifai/datasets/upload/utils.py +196 -21
- clarifai/utils/misc.py +21 -0
- clarifai/versions.py +1 -1
- {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
- clarifai-9.10.3.dist-info/RECORD +96 -0
- clarifai-9.10.3.dist-info/top_level.txt +1 -0
- clarifai/auth/__init__.py +0 -6
- clarifai/auth/helper.py +0 -367
- clarifai/auth/register.py +0 -23
- clarifai/auth/stub.py +0 -127
- clarifai/datasets/upload/examples/README.md +0 -31
- clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai/datasets/upload/loaders/README.md +0 -49
- clarifai/models/model_serving/README.md +0 -155
- clarifai/models/model_serving/docs/custom_config.md +0 -33
- clarifai/models/model_serving/docs/dependencies.md +0 -11
- clarifai/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai/models/model_serving/docs/model_types.md +0 -20
- clarifai/models/model_serving/docs/output.md +0 -28
- clarifai/models/model_serving/examples/README.md +0 -7
- clarifai/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai/modules/README.md +0 -5
- clarifai/modules/style.css +0 -217
- clarifai-9.10.1.dist-info/RECORD +0 -386
- clarifai-9.10.1.dist-info/top_level.txt +0 -2
- clarifai_utils/__init__.py +0 -0
- clarifai_utils/auth/__init__.py +0 -6
- clarifai_utils/auth/helper.py +0 -367
- clarifai_utils/auth/register.py +0 -23
- clarifai_utils/auth/stub.py +0 -127
- clarifai_utils/cli.py +0 -0
- clarifai_utils/client/__init__.py +0 -16
- clarifai_utils/client/app.py +0 -684
- clarifai_utils/client/auth/__init__.py +0 -4
- clarifai_utils/client/auth/helper.py +0 -367
- clarifai_utils/client/auth/register.py +0 -23
- clarifai_utils/client/auth/stub.py +0 -127
- clarifai_utils/client/base.py +0 -131
- clarifai_utils/client/dataset.py +0 -442
- clarifai_utils/client/input.py +0 -892
- clarifai_utils/client/lister.py +0 -54
- clarifai_utils/client/model.py +0 -575
- clarifai_utils/client/module.py +0 -94
- clarifai_utils/client/runner.py +0 -161
- clarifai_utils/client/search.py +0 -239
- clarifai_utils/client/user.py +0 -253
- clarifai_utils/client/workflow.py +0 -223
- clarifai_utils/constants/model.py +0 -4
- clarifai_utils/constants/search.py +0 -2
- clarifai_utils/datasets/__init__.py +0 -0
- clarifai_utils/datasets/export/__init__.py +0 -0
- clarifai_utils/datasets/export/inputs_annotations.py +0 -222
- clarifai_utils/datasets/upload/__init__.py +0 -0
- clarifai_utils/datasets/upload/base.py +0 -66
- clarifai_utils/datasets/upload/examples/README.md +0 -31
- clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai_utils/datasets/upload/features.py +0 -44
- clarifai_utils/datasets/upload/image.py +0 -165
- clarifai_utils/datasets/upload/loaders/README.md +0 -49
- clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
- clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
- clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
- clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
- clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
- clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
- clarifai_utils/datasets/upload/text.py +0 -53
- clarifai_utils/datasets/upload/utils.py +0 -63
- clarifai_utils/errors.py +0 -89
- clarifai_utils/models/__init__.py +0 -0
- clarifai_utils/models/api.py +0 -283
- clarifai_utils/models/model_serving/README.md +0 -155
- clarifai_utils/models/model_serving/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
- clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
- clarifai_utils/models/model_serving/cli/repository.py +0 -87
- clarifai_utils/models/model_serving/constants.py +0 -1
- clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
- clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
- clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai_utils/models/model_serving/docs/model_types.md +0 -20
- clarifai_utils/models/model_serving/docs/output.md +0 -28
- clarifai_utils/models/model_serving/examples/README.md +0 -7
- clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
- clarifai_utils/models/model_serving/model_config/config.py +0 -302
- clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
- clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
- clarifai_utils/models/model_serving/models/__init__.py +0 -12
- clarifai_utils/models/model_serving/models/default_test.py +0 -275
- clarifai_utils/models/model_serving/models/inference.py +0 -42
- clarifai_utils/models/model_serving/models/model_types.py +0 -265
- clarifai_utils/models/model_serving/models/output.py +0 -124
- clarifai_utils/models/model_serving/models/pb_model.py +0 -74
- clarifai_utils/models/model_serving/models/test.py +0 -64
- clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
- clarifai_utils/modules/README.md +0 -5
- clarifai_utils/modules/__init__.py +0 -0
- clarifai_utils/modules/css.py +0 -60
- clarifai_utils/modules/pages.py +0 -42
- clarifai_utils/modules/style.css +0 -217
- clarifai_utils/runners/__init__.py +0 -0
- clarifai_utils/runners/example.py +0 -33
- clarifai_utils/schema/search.py +0 -69
- clarifai_utils/urls/helper.py +0 -103
- clarifai_utils/utils/__init__.py +0 -0
- clarifai_utils/utils/logging.py +0 -90
- clarifai_utils/utils/misc.py +0 -33
- clarifai_utils/utils/model_train.py +0 -157
- clarifai_utils/versions.py +0 -6
- clarifai_utils/workflows/__init__.py +0 -0
- clarifai_utils/workflows/export.py +0 -68
- clarifai_utils/workflows/utils.py +0 -59
- clarifai_utils/workflows/validate.py +0 -67
- {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
- {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
- {clarifai-9.10.1.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
|
@@ -1,134 +1,75 @@
|
|
|
1
|
-
#! COCO
|
|
1
|
+
#! COCO detection dataset
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
|
-
import zipfile
|
|
5
|
-
from glob import glob
|
|
6
4
|
|
|
7
|
-
import cv2
|
|
8
|
-
import requests
|
|
9
5
|
from pycocotools.coco import COCO
|
|
10
|
-
from tqdm import tqdm
|
|
11
6
|
|
|
12
|
-
from
|
|
7
|
+
from ..base import ClarifaiDataLoader
|
|
13
8
|
|
|
14
9
|
from ..features import VisualDetectionFeatures
|
|
15
10
|
|
|
16
11
|
|
|
17
12
|
class COCODetectionDataLoader(ClarifaiDataLoader):
|
|
18
|
-
"""COCO 2017 Image Detection Dataset."""
|
|
19
13
|
|
|
20
|
-
def __init__(self,
|
|
14
|
+
def __init__(self, images_dir, label_filepath):
|
|
21
15
|
"""
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
"val": "val2017.zip",
|
|
31
|
-
"annotations": "annotations_trainval2017.zip"
|
|
32
|
-
}
|
|
33
|
-
self.split = split
|
|
34
|
-
self.url = "http://images.cocodataset.org/zips/" # coco base image-zip url
|
|
35
|
-
self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
|
36
|
-
"data") # data storage directory
|
|
37
|
-
self.extracted_coco_dirs = {"train": None, "val": None, "annotations": None}
|
|
38
|
-
|
|
16
|
+
Args:
|
|
17
|
+
images_dir: Directory containing the images.
|
|
18
|
+
label_filepath: Path to the COCO annotation file.
|
|
19
|
+
"""
|
|
20
|
+
self.images_dir = images_dir
|
|
21
|
+
self.label_filepath = label_filepath
|
|
22
|
+
|
|
23
|
+
self.map_ids = {}
|
|
39
24
|
self.load_data()
|
|
40
25
|
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
os.mkdir(save_dir)
|
|
45
|
-
|
|
46
|
-
#check if train*, val* and annotation* dirs exist
|
|
47
|
-
#so that the coco2017 data isn't downloaded
|
|
48
|
-
for key, filename in self.filenames.items():
|
|
49
|
-
existing_files = glob(f"{save_dir}/{key}*")
|
|
50
|
-
if existing_files:
|
|
51
|
-
print(f"{key} dataset already downloded and extracted")
|
|
52
|
-
continue
|
|
53
|
-
|
|
54
|
-
print("-" * 80)
|
|
55
|
-
print(f"Downloading {filename}")
|
|
56
|
-
print("-" * 80)
|
|
57
|
-
|
|
58
|
-
if "annotations" in filename:
|
|
59
|
-
self.url = "http://images.cocodataset.org/annotations/"
|
|
60
|
-
|
|
61
|
-
response = requests.get(self.url + filename, stream=True)
|
|
62
|
-
response.raise_for_status()
|
|
63
|
-
with open(os.path.join(save_dir, filename), "wb") as _file:
|
|
64
|
-
for chunk in tqdm(response.iter_content(chunk_size=5124000)):
|
|
65
|
-
if chunk:
|
|
66
|
-
_file.write(chunk)
|
|
67
|
-
print("Coco data download complete...")
|
|
68
|
-
|
|
69
|
-
#extract files
|
|
70
|
-
zf = zipfile.ZipFile(os.path.join(save_dir, filename))
|
|
71
|
-
print(f" Extracting {filename} file")
|
|
72
|
-
zf.extractall(path=save_dir)
|
|
73
|
-
# Delete coco zip
|
|
74
|
-
print(f" Deleting {filename}")
|
|
75
|
-
os.remove(path=os.path.join(save_dir, filename))
|
|
76
|
-
|
|
77
|
-
def load_data(self):
|
|
78
|
-
if isinstance(self.filenames, dict) and len(self.filenames) == 3:
|
|
79
|
-
self.coco_download(self.data_dir)
|
|
80
|
-
self.extracted_coco_dirs["train"] = [os.path.join(self.data_dir, i) \
|
|
81
|
-
for i in os.listdir(self.data_dir) if "train" in i][0]
|
|
82
|
-
self.extracted_coco_dirs["val"] = [os.path.join(self.data_dir, i) \
|
|
83
|
-
for i in os.listdir(self.data_dir) if "val" in i][0]
|
|
84
|
-
|
|
85
|
-
self.extracted_coco_dirs["annotations"] = [os.path.join(self.data_dir, i) \
|
|
86
|
-
for i in os.listdir(self.data_dir) if "annotations" in i][0]
|
|
87
|
-
else:
|
|
88
|
-
raise Exception(f"`filenames` must be a dict of atleast 2 coco zip file names; \
|
|
89
|
-
train, val and annotations. Found {len(self.filenames)} items instead.")
|
|
90
|
-
|
|
91
|
-
annot_file = glob(self.extracted_coco_dirs["annotations"] + "/" +\
|
|
92
|
-
f"instances_{self.split}*")[0]
|
|
93
|
-
self.coco = COCO(annot_file)
|
|
94
|
-
categories = self.coco.loadCats(self.coco.getCatIds())
|
|
95
|
-
self.cat_id_map = {category["id"]: category["name"] for category in categories}
|
|
96
|
-
self.cat_img_ids = {}
|
|
97
|
-
for cat_id in list(self.cat_id_map.keys()):
|
|
98
|
-
self.cat_img_ids[cat_id] = self.coco.getImgIds(catIds=[cat_id])
|
|
99
|
-
|
|
100
|
-
img_ids = []
|
|
101
|
-
for i in list(self.cat_img_ids.values()):
|
|
102
|
-
img_ids.extend(i)
|
|
103
|
-
|
|
104
|
-
self.img_ids = list(set(img_ids))
|
|
26
|
+
@property
|
|
27
|
+
def task(self):
|
|
28
|
+
return "visual_detection"
|
|
105
29
|
|
|
106
|
-
def
|
|
107
|
-
|
|
30
|
+
def load_data(self) -> None:
|
|
31
|
+
self.coco = COCO(self.label_filepath)
|
|
32
|
+
self.map_ids = {i: img_id for i, img_id in enumerate(list(self.coco.imgs.keys()))}
|
|
108
33
|
|
|
109
|
-
def __getitem__(self,
|
|
110
|
-
|
|
34
|
+
def __getitem__(self, index: int):
|
|
35
|
+
value = self.coco.imgs[self.map_ids[index]]
|
|
36
|
+
image_path = os.path.join(self.images_dir, value['file_name'])
|
|
111
37
|
annots = [] # bboxes
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
38
|
+
concept_ids = []
|
|
39
|
+
|
|
40
|
+
input_ann_ids = self.coco.getAnnIds(imgIds=[value['id']])
|
|
41
|
+
input_anns = self.coco.loadAnns(input_ann_ids)
|
|
42
|
+
|
|
43
|
+
for ann in input_anns:
|
|
44
|
+
# get concept info
|
|
45
|
+
# note1: concept_name can be human readable
|
|
46
|
+
# note2: concept_id can only be alphanumeric, up to 32 characters, with no special chars except `-` and `_`
|
|
47
|
+
concept_name = self.coco.cats[ann['category_id']]['name']
|
|
48
|
+
concept_id = concept_name.lower().replace(' ', '-')
|
|
49
|
+
|
|
50
|
+
# get bbox information
|
|
51
|
+
# note1: coco bboxes are `[x_min, y_min, width, height]` in pixels
|
|
52
|
+
# note2: clarifai bboxes are `[x_min, y_min, x_max, y_max]` normalized between 0-1.0
|
|
53
|
+
coco_bbox = ann['bbox']
|
|
54
|
+
clarifai_bbox = {
|
|
55
|
+
'left_col': max(0, coco_bbox[0] / value['width']),
|
|
56
|
+
'top_row': max(0, coco_bbox[1] / value['height']),
|
|
57
|
+
'right_col': min(1, (coco_bbox[0] + coco_bbox[2]) / value['width']),
|
|
58
|
+
'bottom_row': min(1, (coco_bbox[1] + coco_bbox[3]) / value['width'])
|
|
59
|
+
}
|
|
60
|
+
if (clarifai_bbox['left_col'] >=
|
|
61
|
+
clarifai_bbox['right_col']) or (clarifai_bbox['top_row'] >= clarifai_bbox['bottom_row']):
|
|
130
62
|
continue
|
|
131
|
-
|
|
132
|
-
|
|
63
|
+
annots.append([
|
|
64
|
+
clarifai_bbox['left_col'], clarifai_bbox['top_row'], clarifai_bbox['right_col'],
|
|
65
|
+
clarifai_bbox['bottom_row']
|
|
66
|
+
])
|
|
67
|
+
concept_ids.append(concept_id)
|
|
68
|
+
|
|
69
|
+
assert len(concept_ids) == len(annots), f"Num concepts must match num bbox annotations\
|
|
70
|
+
for a single image. Found {len(concept_ids)} concepts and {len(annots)} bboxes."
|
|
133
71
|
|
|
134
|
-
return VisualDetectionFeatures(image_path,
|
|
72
|
+
return VisualDetectionFeatures(image_path, concept_ids, annots, id=str(value['id']))
|
|
73
|
+
|
|
74
|
+
def __len__(self):
|
|
75
|
+
return len(self.coco.imgs)
|
|
@@ -2,16 +2,12 @@
|
|
|
2
2
|
|
|
3
3
|
import gc
|
|
4
4
|
import os
|
|
5
|
-
import zipfile
|
|
6
5
|
from functools import reduce
|
|
7
|
-
from glob import glob
|
|
8
6
|
|
|
9
7
|
import cv2
|
|
10
8
|
import numpy as np
|
|
11
|
-
import requests
|
|
12
9
|
from pycocotools import mask as maskUtils
|
|
13
10
|
from pycocotools.coco import COCO
|
|
14
|
-
from tqdm import tqdm
|
|
15
11
|
|
|
16
12
|
from clarifai.datasets.upload.base import ClarifaiDataLoader
|
|
17
13
|
|
|
@@ -19,148 +15,84 @@ from ..features import VisualSegmentationFeatures
|
|
|
19
15
|
|
|
20
16
|
|
|
21
17
|
class COCOSegmentationDataLoader(ClarifaiDataLoader):
|
|
22
|
-
"""COCO
|
|
18
|
+
"""COCO Image Segmentation Dataset."""
|
|
23
19
|
|
|
24
|
-
def __init__(self,
|
|
20
|
+
def __init__(self, images_dir, label_filepath):
|
|
25
21
|
"""
|
|
26
|
-
Initialize coco dataset.
|
|
27
22
|
Args:
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
split: "train" or "val"
|
|
23
|
+
images_dir: Directory containing the images.
|
|
24
|
+
label_filepath: Path to the COCO annotation file.
|
|
31
25
|
"""
|
|
32
|
-
self.
|
|
33
|
-
|
|
34
|
-
"val": "val2017.zip",
|
|
35
|
-
"annotations": "annotations_trainval2017.zip"
|
|
36
|
-
}
|
|
37
|
-
self.split = split
|
|
38
|
-
self.url = "http://images.cocodataset.org/zips/" # coco base image-zip url
|
|
39
|
-
self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
|
40
|
-
"data") # data storage dir
|
|
41
|
-
self.extracted_coco_dirs = {"train": None, "val": None, "annotations": None}
|
|
26
|
+
self.images_dir = images_dir
|
|
27
|
+
self.label_filepath = label_filepath
|
|
42
28
|
|
|
29
|
+
self.map_ids = {}
|
|
43
30
|
self.load_data()
|
|
44
31
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
for key, filename in self.filenames.items():
|
|
53
|
-
existing_files = glob(f"{save_dir}/{key}*")
|
|
54
|
-
if existing_files:
|
|
55
|
-
print(f"{key} dataset already downloded and extracted")
|
|
56
|
-
continue
|
|
57
|
-
|
|
58
|
-
print("-" * 80)
|
|
59
|
-
print(f"Downloading {filename}")
|
|
60
|
-
print("-" * 80)
|
|
61
|
-
|
|
62
|
-
if "annotations" in filename:
|
|
63
|
-
self.url = "http://images.cocodataset.org/annotations/"
|
|
64
|
-
|
|
65
|
-
response = requests.get(self.url + filename, stream=True)
|
|
66
|
-
response.raise_for_status()
|
|
67
|
-
with open(os.path.join(save_dir, filename), "wb") as _file:
|
|
68
|
-
for chunk in tqdm(response.iter_content(chunk_size=5124000)):
|
|
69
|
-
if chunk:
|
|
70
|
-
_file.write(chunk)
|
|
71
|
-
print("Coco data download complete...")
|
|
72
|
-
|
|
73
|
-
#extract files
|
|
74
|
-
zf = zipfile.ZipFile(os.path.join(save_dir, filename))
|
|
75
|
-
print(f" Extracting {filename} file")
|
|
76
|
-
zf.extractall(path=save_dir)
|
|
77
|
-
# Delete coco zip
|
|
78
|
-
print(f" Deleting {filename}")
|
|
79
|
-
os.remove(path=os.path.join(save_dir, filename))
|
|
80
|
-
|
|
81
|
-
def load_data(self):
|
|
82
|
-
"""Load coco dataset image ids or filenames."""
|
|
83
|
-
if isinstance(self.filenames, dict) and len(self.filenames) == 3:
|
|
84
|
-
self.coco_download(self.data_dir)
|
|
85
|
-
self.extracted_coco_dirs["train"] = [os.path.join(self.data_dir, i) \
|
|
86
|
-
for i in os.listdir(self.data_dir) if "train" in i][0]
|
|
87
|
-
self.extracted_coco_dirs["val"] = [os.path.join(self.data_dir, i) \
|
|
88
|
-
for i in os.listdir(self.data_dir) if "val" in i][0]
|
|
89
|
-
|
|
90
|
-
self.extracted_coco_dirs["annotations"] = [os.path.join(self.data_dir, i) \
|
|
91
|
-
for i in os.listdir(self.data_dir) if "annotations" in i][0]
|
|
92
|
-
else:
|
|
93
|
-
raise Exception(f"`filenames` must be a dict of atleast 3 coco zip file names; \
|
|
94
|
-
train, val and annotations. Found {len(self.filenames)} items instead.")
|
|
95
|
-
|
|
96
|
-
annot_file = glob(self.extracted_coco_dirs["annotations"] + "/" + f"instances_{self.split}*")[
|
|
97
|
-
0]
|
|
98
|
-
self.coco = COCO(annot_file)
|
|
99
|
-
categories = self.coco.loadCats(self.coco.getCatIds())
|
|
100
|
-
self.cat_id_map = {category["id"]: category["name"] for category in categories}
|
|
101
|
-
self.cat_img_ids = {}
|
|
102
|
-
for cat_id in list(self.cat_id_map.keys()):
|
|
103
|
-
self.cat_img_ids[cat_id] = self.coco.getImgIds(catIds=[cat_id])
|
|
104
|
-
|
|
105
|
-
img_ids = set()
|
|
106
|
-
for i in list(self.cat_img_ids.values()):
|
|
107
|
-
img_ids.update(i)
|
|
108
|
-
|
|
109
|
-
self.img_ids = list(img_ids)
|
|
32
|
+
@property
|
|
33
|
+
def task(self):
|
|
34
|
+
return "visual_segmentation"
|
|
35
|
+
|
|
36
|
+
def load_data(self) -> None:
|
|
37
|
+
self.coco = COCO(self.label_filepath)
|
|
38
|
+
self.map_ids = {i: img_id for i, img_id in enumerate(list(self.coco.imgs.keys()))}
|
|
110
39
|
|
|
111
40
|
def __len__(self):
|
|
112
|
-
return len(self.
|
|
41
|
+
return len(self.coco.imgs)
|
|
113
42
|
|
|
114
|
-
def __getitem__(self,
|
|
43
|
+
def __getitem__(self, index):
|
|
115
44
|
"""Get image and annotations for a given index."""
|
|
116
|
-
|
|
45
|
+
value = self.coco.imgs[self.map_ids[index]]
|
|
46
|
+
image_path = os.path.join(self.images_dir, value['file_name'])
|
|
117
47
|
annots = [] # polygons
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
48
|
+
concept_ids = []
|
|
49
|
+
|
|
50
|
+
input_ann_ids = self.coco.getAnnIds(imgIds=[value['id']])
|
|
51
|
+
input_anns = self.coco.loadAnns(input_ann_ids)
|
|
52
|
+
|
|
53
|
+
for ann in input_anns:
|
|
54
|
+
# get concept info
|
|
55
|
+
# note1: concept_name can be human readable
|
|
56
|
+
# note2: concept_id can only be alphanumeric, up to 32 characters, with no special chars except `-` and `_`
|
|
57
|
+
concept_name = self.coco.cats[ann['category_id']]['name']
|
|
58
|
+
concept_id = concept_name.lower().replace(' ', '-')
|
|
59
|
+
|
|
60
|
+
# get polygons
|
|
61
|
+
if isinstance(ann['segmentation'], list):
|
|
62
|
+
poly = np.array(ann['segmentation']).reshape((int(len(ann['segmentation'][0]) / 2),
|
|
63
|
+
2)).astype(float)
|
|
64
|
+
poly[:, 0], poly[:, 1] = poly[:, 0] / value['width'], poly[:, 1] / value['height']
|
|
65
|
+
poly = np.clip(poly, 0, 1)
|
|
66
|
+
annots.append(poly.tolist()) #[[x=col, y=row],...]
|
|
67
|
+
concept_ids.append(concept_id)
|
|
68
|
+
else: # seg: {"counts":[...]}
|
|
69
|
+
if isinstance(ann['segmentation']['counts'], list):
|
|
70
|
+
rle = maskUtils.frPyObjects([ann['segmentation']], value['height'], value['width'])
|
|
71
|
+
else:
|
|
72
|
+
rle = ann['segmentation']
|
|
73
|
+
mask = maskUtils.decode(rle) #binary mask
|
|
74
|
+
#convert mask to polygons and add to annots
|
|
75
|
+
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
|
76
|
+
polygons = []
|
|
77
|
+
for cont in contours:
|
|
78
|
+
if cont.size >= 6:
|
|
79
|
+
polygons.append(cont.astype(float).flatten().tolist())
|
|
80
|
+
# store polygons in (x,y) pairs
|
|
81
|
+
polygons_flattened = reduce(lambda x, y: x + y, polygons)
|
|
82
|
+
del polygons
|
|
83
|
+
del contours
|
|
84
|
+
del mask
|
|
85
|
+
gc.collect()
|
|
86
|
+
|
|
87
|
+
polygons = np.array(polygons_flattened).reshape((int(len(polygons_flattened) / 2),
|
|
88
|
+
2)).astype(float)
|
|
89
|
+
polygons[:, 0] = polygons[:, 0] / value['width']
|
|
90
|
+
polygons[:, 1] = polygons[:, 1] / value['height']
|
|
91
|
+
polygons = np.clip(polygons, 0, 1)
|
|
92
|
+
annots.append(polygons.tolist()) #[[x=col, y=row],...,[x=col, y=row]]
|
|
93
|
+
concept_ids.append(concept_id)
|
|
94
|
+
|
|
95
|
+
assert len(concept_ids) == len(annots), f"Num concepts must match num bbox annotations\
|
|
96
|
+
for a single image. Found {len(concept_ids)} concepts and {len(annots)} bboxes."
|
|
97
|
+
|
|
98
|
+
return VisualSegmentationFeatures(image_path, concept_ids, annots, id=str(value['id']))
|
|
@@ -9,7 +9,7 @@ from ..features import VisualClassificationFeatures
|
|
|
9
9
|
class ImageNetDataLoader(ClarifaiDataLoader):
|
|
10
10
|
"""ImageNet Dataset."""
|
|
11
11
|
|
|
12
|
-
def __init__(self, split: str = "train"):
|
|
12
|
+
def __init__(self, data_dir, split: str = "train"):
|
|
13
13
|
"""
|
|
14
14
|
Initialize dataset params.
|
|
15
15
|
Args:
|
|
@@ -17,8 +17,7 @@ class ImageNetDataLoader(ClarifaiDataLoader):
|
|
|
17
17
|
split: "train" or "test"
|
|
18
18
|
"""
|
|
19
19
|
self.split = split
|
|
20
|
-
self.data_dir =
|
|
21
|
-
"data") # data storage directory
|
|
20
|
+
self.data_dir = data_dir
|
|
22
21
|
self.label_map = dict()
|
|
23
22
|
self.concepts = []
|
|
24
23
|
self.image_paths = []
|
|
@@ -31,13 +31,13 @@ class xviewDetectionDataLoader(ClarifaiDataLoader):
|
|
|
31
31
|
'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower'
|
|
32
32
|
]
|
|
33
33
|
|
|
34
|
-
def __init__(self,
|
|
34
|
+
def __init__(self, data_dir) -> None:
|
|
35
35
|
"""Initialize and Compress xview dataset.
|
|
36
36
|
Args:
|
|
37
|
-
|
|
37
|
+
data_dir: the local dataset directory.
|
|
38
38
|
"""
|
|
39
39
|
|
|
40
|
-
self.data_dir =
|
|
40
|
+
self.data_dir = data_dir
|
|
41
41
|
self.img_dir = os.path.join(self.data_dir, "train_images")
|
|
42
42
|
self.img_comp_dir = os.path.join(self.data_dir, "train_images_comp")
|
|
43
43
|
self.label_file = os.path.join(self.data_dir, "xview_train.geojson")
|
clarifai/datasets/upload/text.py
CHANGED
|
@@ -1,17 +1,19 @@
|
|
|
1
1
|
from concurrent.futures import ThreadPoolExecutor
|
|
2
|
-
from typing import
|
|
2
|
+
from typing import List, Tuple, Type
|
|
3
3
|
|
|
4
4
|
from clarifai_grpc.grpc.api import resources_pb2
|
|
5
5
|
from google.protobuf.struct_pb2 import Struct
|
|
6
6
|
|
|
7
|
-
from .
|
|
7
|
+
from clarifai.client.input import Inputs
|
|
8
|
+
|
|
9
|
+
from .base import ClarifaiDataLoader, ClarifaiDataset
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
class TextClassificationDataset(ClarifaiDataset):
|
|
11
13
|
"""Upload text classification datasets to clarifai datasets"""
|
|
12
14
|
|
|
13
|
-
def __init__(self,
|
|
14
|
-
super().__init__(
|
|
15
|
+
def __init__(self, data_generator: Type[ClarifaiDataLoader], dataset_id: str) -> None:
|
|
16
|
+
super().__init__(data_generator, dataset_id)
|
|
15
17
|
|
|
16
18
|
def _extract_protos(self, batch_input_ids: List[int]
|
|
17
19
|
) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
|
|
@@ -24,21 +26,19 @@ class TextClassificationDataset(ClarifaiDataset):
|
|
|
24
26
|
"""
|
|
25
27
|
input_protos, annotation_protos = [], []
|
|
26
28
|
|
|
27
|
-
def
|
|
28
|
-
|
|
29
|
+
def process_data_item(id):
|
|
30
|
+
data_item = self.data_generator[id]
|
|
29
31
|
metadata = Struct()
|
|
30
|
-
text =
|
|
31
|
-
labels =
|
|
32
|
-
|
|
33
|
-
input_id = f"{self.dataset_id}-{
|
|
34
|
-
if
|
|
35
|
-
metadata.update(
|
|
36
|
-
else:
|
|
37
|
-
metadata.update({"split": self.split})
|
|
32
|
+
text = data_item.text
|
|
33
|
+
labels = data_item.labels if isinstance(data_item.labels,
|
|
34
|
+
list) else [data_item.labels] # clarifai concept
|
|
35
|
+
input_id = f"{self.dataset_id}-{id}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
|
36
|
+
if data_item.metadata is not None:
|
|
37
|
+
metadata.update(data_item.metadata)
|
|
38
38
|
|
|
39
39
|
self.all_input_ids[id] = input_id
|
|
40
40
|
input_protos.append(
|
|
41
|
-
|
|
41
|
+
Inputs.get_text_input(
|
|
42
42
|
input_id=input_id,
|
|
43
43
|
raw_text=text,
|
|
44
44
|
dataset_id=self.dataset_id,
|
|
@@ -46,7 +46,7 @@ class TextClassificationDataset(ClarifaiDataset):
|
|
|
46
46
|
metadata=metadata))
|
|
47
47
|
|
|
48
48
|
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
49
|
-
futures = [executor.submit(
|
|
49
|
+
futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
|
|
50
50
|
for job in futures:
|
|
51
51
|
job.result()
|
|
52
52
|
|