clarifai 9.10.2__py3-none-any.whl → 9.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/client/__init__.py +3 -2
- clarifai/client/app.py +39 -23
- clarifai/client/base.py +6 -6
- clarifai/client/dataset.py +113 -55
- clarifai/client/input.py +47 -55
- clarifai/client/model.py +27 -25
- clarifai/client/module.py +13 -11
- clarifai/client/runner.py +5 -3
- clarifai/client/search.py +7 -3
- clarifai/client/user.py +14 -8
- clarifai/client/workflow.py +22 -20
- clarifai/constants/dataset.py +22 -0
- clarifai/datasets/upload/base.py +9 -7
- clarifai/datasets/upload/features.py +3 -3
- clarifai/datasets/upload/image.py +49 -50
- clarifai/datasets/upload/loaders/coco_captions.py +26 -80
- clarifai/datasets/upload/loaders/coco_detection.py +56 -115
- clarifai/datasets/upload/loaders/coco_segmentation.py +69 -137
- clarifai/datasets/upload/loaders/imagenet_classification.py +2 -3
- clarifai/datasets/upload/loaders/xview_detection.py +3 -3
- clarifai/datasets/upload/text.py +16 -16
- clarifai/datasets/upload/utils.py +196 -21
- clarifai/utils/misc.py +21 -0
- clarifai/versions.py +1 -1
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/METADATA +3 -3
- clarifai-9.10.3.dist-info/RECORD +96 -0
- clarifai-9.10.3.dist-info/top_level.txt +1 -0
- clarifai/auth/__init__.py +0 -6
- clarifai/auth/helper.py +0 -367
- clarifai/auth/register.py +0 -23
- clarifai/auth/stub.py +0 -127
- clarifai/datasets/upload/examples/README.md +0 -31
- clarifai/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai/datasets/upload/loaders/README.md +0 -49
- clarifai/models/model_serving/README.md +0 -155
- clarifai/models/model_serving/docs/custom_config.md +0 -33
- clarifai/models/model_serving/docs/dependencies.md +0 -11
- clarifai/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai/models/model_serving/docs/model_types.md +0 -20
- clarifai/models/model_serving/docs/output.md +0 -28
- clarifai/models/model_serving/examples/README.md +0 -7
- clarifai/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai/modules/README.md +0 -5
- clarifai/modules/style.css +0 -217
- clarifai-9.10.2.dist-info/RECORD +0 -386
- clarifai-9.10.2.dist-info/top_level.txt +0 -2
- clarifai_utils/__init__.py +0 -0
- clarifai_utils/auth/__init__.py +0 -6
- clarifai_utils/auth/helper.py +0 -367
- clarifai_utils/auth/register.py +0 -23
- clarifai_utils/auth/stub.py +0 -127
- clarifai_utils/cli.py +0 -0
- clarifai_utils/client/__init__.py +0 -16
- clarifai_utils/client/app.py +0 -684
- clarifai_utils/client/auth/__init__.py +0 -4
- clarifai_utils/client/auth/helper.py +0 -367
- clarifai_utils/client/auth/register.py +0 -23
- clarifai_utils/client/auth/stub.py +0 -127
- clarifai_utils/client/base.py +0 -131
- clarifai_utils/client/dataset.py +0 -442
- clarifai_utils/client/input.py +0 -892
- clarifai_utils/client/lister.py +0 -54
- clarifai_utils/client/model.py +0 -575
- clarifai_utils/client/module.py +0 -94
- clarifai_utils/client/runner.py +0 -161
- clarifai_utils/client/search.py +0 -254
- clarifai_utils/client/user.py +0 -253
- clarifai_utils/client/workflow.py +0 -223
- clarifai_utils/constants/model.py +0 -4
- clarifai_utils/constants/search.py +0 -2
- clarifai_utils/datasets/__init__.py +0 -0
- clarifai_utils/datasets/export/__init__.py +0 -0
- clarifai_utils/datasets/export/inputs_annotations.py +0 -222
- clarifai_utils/datasets/upload/__init__.py +0 -0
- clarifai_utils/datasets/upload/base.py +0 -66
- clarifai_utils/datasets/upload/examples/README.md +0 -31
- clarifai_utils/datasets/upload/examples/image_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_test.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/cifar_small_train.csv +0 -10
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +0 -46
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +0 -42
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/test.csv +0 -201
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/train.csv +0 -201
- clarifai_utils/datasets/upload/features.py +0 -44
- clarifai_utils/datasets/upload/image.py +0 -165
- clarifai_utils/datasets/upload/loaders/README.md +0 -49
- clarifai_utils/datasets/upload/loaders/__init__.py +0 -0
- clarifai_utils/datasets/upload/loaders/coco_captions.py +0 -103
- clarifai_utils/datasets/upload/loaders/coco_detection.py +0 -134
- clarifai_utils/datasets/upload/loaders/coco_segmentation.py +0 -166
- clarifai_utils/datasets/upload/loaders/imagenet_classification.py +0 -59
- clarifai_utils/datasets/upload/loaders/xview_detection.py +0 -148
- clarifai_utils/datasets/upload/text.py +0 -53
- clarifai_utils/datasets/upload/utils.py +0 -63
- clarifai_utils/errors.py +0 -89
- clarifai_utils/models/__init__.py +0 -0
- clarifai_utils/models/api.py +0 -283
- clarifai_utils/models/model_serving/README.md +0 -155
- clarifai_utils/models/model_serving/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/__init__.py +0 -12
- clarifai_utils/models/model_serving/cli/deploy_cli.py +0 -123
- clarifai_utils/models/model_serving/cli/model_zip.py +0 -61
- clarifai_utils/models/model_serving/cli/repository.py +0 -87
- clarifai_utils/models/model_serving/constants.py +0 -1
- clarifai_utils/models/model_serving/docs/custom_config.md +0 -33
- clarifai_utils/models/model_serving/docs/dependencies.md +0 -11
- clarifai_utils/models/model_serving/docs/inference_parameters.md +0 -134
- clarifai_utils/models/model_serving/docs/model_types.md +0 -20
- clarifai_utils/models/model_serving/docs/output.md +0 -28
- clarifai_utils/models/model_serving/examples/README.md +0 -7
- clarifai_utils/models/model_serving/examples/image_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/inference.py +0 -56
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/README.md +0 -11
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/config.json +0 -42
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/vit-age-classifier/preprocessor_config.json +0 -15
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/config.pbtxt +0 -23
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/labels.txt +0 -9
- clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_classification/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/README.md +0 -12
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/config.json +0 -34
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/twitter-xlm-roberta-base-sentiment/special_tokens_map.json +0 -1
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/config.pbtxt +0 -21
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/labels.txt +0 -3
- clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +0 -7
- clarifai_utils/models/model_serving/examples/text_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/README.md +0 -9
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/inference.py +0 -52
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +0 -6
- clarifai_utils/models/model_serving/examples/text_to_text/README.md +0 -10
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +0 -47
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/config.pbtxt +0 -20
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +0 -4
- clarifai_utils/models/model_serving/examples/visual_detection/README.md +0 -11
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py +0 -72
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py +0 -61
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/config.pbtxt +0 -36
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/labels.txt +0 -80
- clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +0 -12
- clarifai_utils/models/model_serving/examples/visual_embedding/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +0 -51
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/config.pbtxt +0 -22
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +0 -5
- clarifai_utils/models/model_serving/examples/visual_segmentation/README.md +0 -9
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +0 -55
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +0 -60
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/config.pbtxt +0 -24
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/labels.txt +0 -18
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +0 -5
- clarifai_utils/models/model_serving/model_config/__init__.py +0 -14
- clarifai_utils/models/model_serving/model_config/config.py +0 -302
- clarifai_utils/models/model_serving/model_config/inference_parameter.py +0 -124
- clarifai_utils/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -24
- clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -28
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -18
- clarifai_utils/models/model_serving/model_config/serializer.py +0 -134
- clarifai_utils/models/model_serving/models/__init__.py +0 -12
- clarifai_utils/models/model_serving/models/default_test.py +0 -275
- clarifai_utils/models/model_serving/models/inference.py +0 -42
- clarifai_utils/models/model_serving/models/model_types.py +0 -265
- clarifai_utils/models/model_serving/models/output.py +0 -124
- clarifai_utils/models/model_serving/models/pb_model.py +0 -74
- clarifai_utils/models/model_serving/models/test.py +0 -64
- clarifai_utils/models/model_serving/pb_model_repository.py +0 -101
- clarifai_utils/modules/README.md +0 -5
- clarifai_utils/modules/__init__.py +0 -0
- clarifai_utils/modules/css.py +0 -60
- clarifai_utils/modules/pages.py +0 -42
- clarifai_utils/modules/style.css +0 -217
- clarifai_utils/runners/__init__.py +0 -0
- clarifai_utils/runners/example.py +0 -33
- clarifai_utils/schema/search.py +0 -69
- clarifai_utils/urls/helper.py +0 -103
- clarifai_utils/utils/__init__.py +0 -0
- clarifai_utils/utils/logging.py +0 -90
- clarifai_utils/utils/misc.py +0 -33
- clarifai_utils/utils/model_train.py +0 -157
- clarifai_utils/versions.py +0 -6
- clarifai_utils/workflows/__init__.py +0 -0
- clarifai_utils/workflows/export.py +0 -68
- clarifai_utils/workflows/utils.py +0 -59
- clarifai_utils/workflows/validate.py +0 -67
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/LICENSE +0 -0
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/WHEEL +0 -0
- {clarifai-9.10.2.dist-info → clarifai-9.10.3.dist-info}/entry_points.txt +0 -0
clarifai/client/workflow.py
CHANGED
|
@@ -18,16 +18,17 @@ class Workflow(Lister, BaseClient):
|
|
|
18
18
|
"""Workflow is a class that provides access to Clarifai API endpoints related to Workflow information."""
|
|
19
19
|
|
|
20
20
|
def __init__(self,
|
|
21
|
-
|
|
22
|
-
workflow_id: str =
|
|
21
|
+
url: str = None,
|
|
22
|
+
workflow_id: str = None,
|
|
23
23
|
workflow_version: Dict = {'id': ""},
|
|
24
24
|
output_config: Dict = {'min_value': 0},
|
|
25
25
|
base_url: str = "https://api.clarifai.com",
|
|
26
|
+
pat: str = None,
|
|
26
27
|
**kwargs):
|
|
27
28
|
"""Initializes a Workflow object.
|
|
28
29
|
|
|
29
30
|
Args:
|
|
30
|
-
|
|
31
|
+
url (str): The URL to initialize the workflow object.
|
|
31
32
|
workflow_id (str): The Workflow ID to interact with.
|
|
32
33
|
workflow_version (dict): The Workflow Version to interact with.
|
|
33
34
|
output_config (dict): The output config to interact with.
|
|
@@ -38,20 +39,20 @@ class Workflow(Lister, BaseClient):
|
|
|
38
39
|
base_url (str): Base API url. Default "https://api.clarifai.com"
|
|
39
40
|
**kwargs: Additional keyword arguments to be passed to the Workflow.
|
|
40
41
|
"""
|
|
41
|
-
if
|
|
42
|
-
raise UserError("You can only specify one of
|
|
43
|
-
if
|
|
44
|
-
raise UserError("You must specify one of
|
|
45
|
-
if
|
|
42
|
+
if url and workflow_id:
|
|
43
|
+
raise UserError("You can only specify one of url or workflow_id.")
|
|
44
|
+
if not url and not workflow_id:
|
|
45
|
+
raise UserError("You must specify one of url or workflow_id.")
|
|
46
|
+
if url:
|
|
46
47
|
user_id, app_id, _, workflow_id, workflow_version_id = ClarifaiUrlHelper.split_clarifai_url(
|
|
47
|
-
|
|
48
|
+
url)
|
|
48
49
|
workflow_version = {'id': workflow_version_id}
|
|
49
50
|
kwargs = {'user_id': user_id, 'app_id': app_id}
|
|
50
51
|
self.kwargs = {**kwargs, 'id': workflow_id, 'version': workflow_version}
|
|
51
52
|
self.output_config = output_config
|
|
52
53
|
self.workflow_info = resources_pb2.Workflow(**self.kwargs)
|
|
53
54
|
self.logger = get_logger(logger_level="INFO")
|
|
54
|
-
BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url)
|
|
55
|
+
BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url, pat=pat)
|
|
55
56
|
Lister.__init__(self)
|
|
56
57
|
|
|
57
58
|
def predict(self, inputs: List[Input]):
|
|
@@ -84,7 +85,7 @@ class Workflow(Lister, BaseClient):
|
|
|
84
85
|
|
|
85
86
|
Example:
|
|
86
87
|
>>> from clarifai.client.workflow import Workflow
|
|
87
|
-
>>> workflow = Workflow("
|
|
88
|
+
>>> workflow = Workflow("url") # Example: https://clarifai.com/clarifai/main/workflows/Face-Sentiment
|
|
88
89
|
or
|
|
89
90
|
>>> workflow = Workflow(user_id='user_id', app_id='app_id', workflow_id='workflow_id')
|
|
90
91
|
>>> workflow_prediction = workflow.predict_by_filepath('filepath', 'image')
|
|
@@ -112,13 +113,13 @@ class Workflow(Lister, BaseClient):
|
|
|
112
113
|
raise UserError('Invalid bytes.')
|
|
113
114
|
|
|
114
115
|
if input_type == "image":
|
|
115
|
-
input_proto = Inputs
|
|
116
|
+
input_proto = Inputs.get_input_from_bytes("", image_bytes=input_bytes)
|
|
116
117
|
elif input_type == "text":
|
|
117
|
-
input_proto = Inputs
|
|
118
|
+
input_proto = Inputs.get_input_from_bytes("", text_bytes=input_bytes)
|
|
118
119
|
elif input_type == "video":
|
|
119
|
-
input_proto = Inputs
|
|
120
|
+
input_proto = Inputs.get_input_from_bytes("", video_bytes=input_bytes)
|
|
120
121
|
elif input_type == "audio":
|
|
121
|
-
input_proto = Inputs
|
|
122
|
+
input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
|
|
122
123
|
|
|
123
124
|
return self.predict(inputs=[input_proto])
|
|
124
125
|
|
|
@@ -131,7 +132,7 @@ class Workflow(Lister, BaseClient):
|
|
|
131
132
|
|
|
132
133
|
Example:
|
|
133
134
|
>>> from clarifai.client.workflow import Workflow
|
|
134
|
-
>>> workflow = Workflow("
|
|
135
|
+
>>> workflow = Workflow("url") # Example: https://clarifai.com/clarifai/main/workflows/Face-Sentiment
|
|
135
136
|
or
|
|
136
137
|
>>> workflow = Workflow(user_id='user_id', app_id='app_id', workflow_id='workflow_id')
|
|
137
138
|
>>> workflow_prediction = workflow.predict_by_url('url', 'image')
|
|
@@ -140,13 +141,13 @@ class Workflow(Lister, BaseClient):
|
|
|
140
141
|
raise UserError('Invalid input type it should be image, text, video or audio.')
|
|
141
142
|
|
|
142
143
|
if input_type == "image":
|
|
143
|
-
input_proto = Inputs
|
|
144
|
+
input_proto = Inputs.get_input_from_url("", image_url=url)
|
|
144
145
|
elif input_type == "text":
|
|
145
|
-
input_proto = Inputs
|
|
146
|
+
input_proto = Inputs.get_input_from_url("", text_url=url)
|
|
146
147
|
elif input_type == "video":
|
|
147
|
-
input_proto = Inputs
|
|
148
|
+
input_proto = Inputs.get_input_from_url("", video_url=url)
|
|
148
149
|
elif input_type == "audio":
|
|
149
|
-
input_proto = Inputs
|
|
150
|
+
input_proto = Inputs.get_input_from_url("", audio_url=url)
|
|
150
151
|
|
|
151
152
|
return self.predict(inputs=[input_proto])
|
|
152
153
|
|
|
@@ -187,6 +188,7 @@ class Workflow(Lister, BaseClient):
|
|
|
187
188
|
yield Workflow(
|
|
188
189
|
workflow_id=self.id,
|
|
189
190
|
base_url=self.base,
|
|
191
|
+
pat=self.pat,
|
|
190
192
|
**dict(self.kwargs, version=workflow_version_info))
|
|
191
193
|
|
|
192
194
|
def export(self, out_path: str):
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
DATASET_UPLOAD_TASKS = [
|
|
2
|
+
"visual_classification", "text_classification", "visual_detection", "visual_segmentation",
|
|
3
|
+
"visual_captioning"
|
|
4
|
+
]
|
|
5
|
+
|
|
6
|
+
TASK_TO_ANNOTATION_TYPE = {
|
|
7
|
+
"visual_classification": {
|
|
8
|
+
"concepts": "labels"
|
|
9
|
+
},
|
|
10
|
+
"text_classification": {
|
|
11
|
+
"concepts": "labels"
|
|
12
|
+
},
|
|
13
|
+
"visual_captioning": {
|
|
14
|
+
"concepts": "labels"
|
|
15
|
+
},
|
|
16
|
+
"visual_detection": {
|
|
17
|
+
"bboxes": "bboxes"
|
|
18
|
+
},
|
|
19
|
+
"visual_segmentation": {
|
|
20
|
+
"polygons": "polygons"
|
|
21
|
+
},
|
|
22
|
+
}
|
clarifai/datasets/upload/base.py
CHANGED
|
@@ -3,7 +3,7 @@ from typing import Iterator, List, Tuple, TypeVar, Union
|
|
|
3
3
|
|
|
4
4
|
from clarifai_grpc.grpc.api import resources_pb2
|
|
5
5
|
|
|
6
|
-
from clarifai.
|
|
6
|
+
from clarifai.constants.dataset import DATASET_UPLOAD_TASKS
|
|
7
7
|
from clarifai.datasets.upload.features import (TextFeatures, VisualClassificationFeatures,
|
|
8
8
|
VisualDetectionFeatures, VisualSegmentationFeatures)
|
|
9
9
|
|
|
@@ -16,18 +16,16 @@ OutputFeaturesType = TypeVar(
|
|
|
16
16
|
class ClarifaiDataset:
|
|
17
17
|
"""Clarifai datasets base class."""
|
|
18
18
|
|
|
19
|
-
def __init__(self,
|
|
20
|
-
self.
|
|
19
|
+
def __init__(self, data_generator: 'ClarifaiDataLoader', dataset_id: str) -> None:
|
|
20
|
+
self.data_generator = data_generator
|
|
21
21
|
self.dataset_id = dataset_id
|
|
22
|
-
self.split = split
|
|
23
22
|
self.all_input_ids = {}
|
|
24
23
|
self._all_input_protos = {}
|
|
25
24
|
self._all_annotation_protos = defaultdict(list)
|
|
26
|
-
self.input_object = Inputs()
|
|
27
25
|
|
|
28
26
|
def __len__(self) -> int:
|
|
29
27
|
"""Get size of all input protos"""
|
|
30
|
-
return len(self.
|
|
28
|
+
return len(self.data_generator)
|
|
31
29
|
|
|
32
30
|
def _to_list(self, input_protos: Iterator) -> List:
|
|
33
31
|
"""Parse protos iterator to list."""
|
|
@@ -53,9 +51,13 @@ class ClarifaiDataset:
|
|
|
53
51
|
class ClarifaiDataLoader:
|
|
54
52
|
"""Clarifai data loader base class."""
|
|
55
53
|
|
|
56
|
-
def __init__(self
|
|
54
|
+
def __init__(self) -> None:
|
|
57
55
|
pass
|
|
58
56
|
|
|
57
|
+
@property
|
|
58
|
+
def task(self):
|
|
59
|
+
raise NotImplementedError("Task should be one of {}".format(DATASET_UPLOAD_TASKS))
|
|
60
|
+
|
|
59
61
|
def load_data(self) -> None:
|
|
60
62
|
raise NotImplementedError()
|
|
61
63
|
|
|
@@ -16,7 +16,7 @@ class TextFeatures:
|
|
|
16
16
|
class VisualClassificationFeatures:
|
|
17
17
|
"""Image classification datasets preprocessing output features."""
|
|
18
18
|
image_path: str
|
|
19
|
-
|
|
19
|
+
labels: List[Union[str, int]] # List[str or int] to cater for multi-class tasks
|
|
20
20
|
geo_info: Optional[List[float]] = None #[Longitude, Latitude]
|
|
21
21
|
id: Optional[int] = None # image_id
|
|
22
22
|
metadata: Optional[dict] = None
|
|
@@ -26,7 +26,7 @@ class VisualClassificationFeatures:
|
|
|
26
26
|
class VisualDetectionFeatures:
|
|
27
27
|
"""Image Detection datasets preprocessing output features."""
|
|
28
28
|
image_path: str
|
|
29
|
-
|
|
29
|
+
labels: List[Union[str, int]]
|
|
30
30
|
bboxes: List[List[float]]
|
|
31
31
|
geo_info: Optional[List[float]] = None #[Longitude, Latitude]
|
|
32
32
|
id: Optional[int] = None # image_id
|
|
@@ -37,7 +37,7 @@ class VisualDetectionFeatures:
|
|
|
37
37
|
class VisualSegmentationFeatures:
|
|
38
38
|
"""Image Segmentation datasets preprocessing output features."""
|
|
39
39
|
image_path: str
|
|
40
|
-
|
|
40
|
+
labels: List[Union[str, int]]
|
|
41
41
|
polygons: List[List[List[float]]]
|
|
42
42
|
geo_info: Optional[List[float]] = None #[Longitude, Latitude]
|
|
43
43
|
id: Optional[int] = None # image_id
|
|
@@ -1,17 +1,18 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from concurrent.futures import ThreadPoolExecutor
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import List, Tuple, Type
|
|
4
4
|
|
|
5
5
|
from clarifai_grpc.grpc.api import resources_pb2
|
|
6
6
|
from google.protobuf.struct_pb2 import Struct
|
|
7
7
|
|
|
8
|
-
from .
|
|
8
|
+
from clarifai.client.input import Inputs
|
|
9
|
+
from clarifai.datasets.upload.base import ClarifaiDataLoader, ClarifaiDataset
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
class VisualClassificationDataset(ClarifaiDataset):
|
|
12
13
|
|
|
13
|
-
def __init__(self,
|
|
14
|
-
super().__init__(
|
|
14
|
+
def __init__(self, data_generator: Type[ClarifaiDataLoader], dataset_id: str) -> None:
|
|
15
|
+
super().__init__(data_generator, dataset_id)
|
|
15
16
|
|
|
16
17
|
def _extract_protos(self, batch_input_ids: List[str]
|
|
17
18
|
) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
|
|
@@ -24,31 +25,31 @@ class VisualClassificationDataset(ClarifaiDataset):
|
|
|
24
25
|
"""
|
|
25
26
|
input_protos, annotation_protos = [], []
|
|
26
27
|
|
|
27
|
-
def
|
|
28
|
-
|
|
28
|
+
def process_data_item(id):
|
|
29
|
+
data_item = self.data_generator[id]
|
|
29
30
|
metadata = Struct()
|
|
30
|
-
image_path =
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
input_id = f"{self.dataset_id}-{
|
|
34
|
-
geo_info =
|
|
35
|
-
if
|
|
36
|
-
metadata.update(
|
|
31
|
+
image_path = data_item.image_path
|
|
32
|
+
labels = data_item.labels if isinstance(data_item.labels,
|
|
33
|
+
list) else [data_item.labels] # clarifai concept
|
|
34
|
+
input_id = f"{self.dataset_id}-{id}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
|
35
|
+
geo_info = data_item.geo_info
|
|
36
|
+
if data_item.metadata is not None:
|
|
37
|
+
metadata.update(data_item.metadata)
|
|
37
38
|
else:
|
|
38
|
-
metadata.update({"filename": os.path.basename(image_path)
|
|
39
|
+
metadata.update({"filename": os.path.basename(image_path)})
|
|
39
40
|
|
|
40
41
|
self.all_input_ids[id] = input_id
|
|
41
42
|
input_protos.append(
|
|
42
|
-
|
|
43
|
+
Inputs.get_input_from_file(
|
|
43
44
|
input_id=input_id,
|
|
44
45
|
image_file=image_path,
|
|
45
46
|
dataset_id=self.dataset_id,
|
|
46
|
-
labels=
|
|
47
|
+
labels=labels,
|
|
47
48
|
geo_info=geo_info,
|
|
48
49
|
metadata=metadata))
|
|
49
50
|
|
|
50
51
|
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
51
|
-
futures = [executor.submit(
|
|
52
|
+
futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
|
|
52
53
|
for job in futures:
|
|
53
54
|
job.result()
|
|
54
55
|
|
|
@@ -58,8 +59,8 @@ class VisualClassificationDataset(ClarifaiDataset):
|
|
|
58
59
|
class VisualDetectionDataset(ClarifaiDataset):
|
|
59
60
|
"""Visual detection dataset proto class."""
|
|
60
61
|
|
|
61
|
-
def __init__(self,
|
|
62
|
-
super().__init__(
|
|
62
|
+
def __init__(self, data_generator: Type[ClarifaiDataLoader], dataset_id: str) -> None:
|
|
63
|
+
super().__init__(data_generator, dataset_id)
|
|
63
64
|
|
|
64
65
|
def _extract_protos(self, batch_input_ids: List[int]
|
|
65
66
|
) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
|
|
@@ -72,36 +73,35 @@ class VisualDetectionDataset(ClarifaiDataset):
|
|
|
72
73
|
"""
|
|
73
74
|
input_protos, annotation_protos = [], []
|
|
74
75
|
|
|
75
|
-
def
|
|
76
|
-
|
|
76
|
+
def process_data_item(id):
|
|
77
|
+
data_item = self.data_generator[id]
|
|
77
78
|
metadata = Struct()
|
|
78
|
-
image =
|
|
79
|
-
labels =
|
|
80
|
-
bboxes =
|
|
81
|
-
input_id = f"{self.dataset_id}-{
|
|
82
|
-
if
|
|
83
|
-
metadata.update(
|
|
79
|
+
image = data_item.image_path
|
|
80
|
+
labels = data_item.labels # list:[l1,...,ln]
|
|
81
|
+
bboxes = data_item.bboxes # [[xmin,ymin,xmax,ymax],...,[xmin,ymin,xmax,ymax]]
|
|
82
|
+
input_id = f"{self.dataset_id}-{id}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
|
83
|
+
if data_item.metadata is not None:
|
|
84
|
+
metadata.update(data_item.metadata)
|
|
84
85
|
else:
|
|
85
|
-
metadata.update({"filename": os.path.basename(image)
|
|
86
|
-
geo_info =
|
|
86
|
+
metadata.update({"filename": os.path.basename(image)})
|
|
87
|
+
geo_info = data_item.geo_info
|
|
87
88
|
|
|
88
89
|
self.all_input_ids[id] = input_id
|
|
89
90
|
input_protos.append(
|
|
90
|
-
|
|
91
|
+
Inputs.get_input_from_file(
|
|
91
92
|
input_id=input_id,
|
|
92
93
|
image_file=image,
|
|
93
94
|
dataset_id=self.dataset_id,
|
|
94
95
|
geo_info=geo_info,
|
|
95
96
|
metadata=metadata))
|
|
96
|
-
# iter over bboxes and
|
|
97
|
+
# iter over bboxes and labels
|
|
97
98
|
# one id could have more than one bbox and label
|
|
98
99
|
for i in range(len(bboxes)):
|
|
99
100
|
annotation_protos.append(
|
|
100
|
-
|
|
101
|
-
input_id=input_id, label=labels[i], annotations=bboxes[i]))
|
|
101
|
+
Inputs.get_annotation_proto(input_id=input_id, label=labels[i], annotations=bboxes[i]))
|
|
102
102
|
|
|
103
103
|
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
104
|
-
futures = [executor.submit(
|
|
104
|
+
futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
|
|
105
105
|
for job in futures:
|
|
106
106
|
job.result()
|
|
107
107
|
|
|
@@ -111,8 +111,8 @@ class VisualDetectionDataset(ClarifaiDataset):
|
|
|
111
111
|
class VisualSegmentationDataset(ClarifaiDataset):
|
|
112
112
|
"""Visual segmentation dataset proto class."""
|
|
113
113
|
|
|
114
|
-
def __init__(self,
|
|
115
|
-
super().__init__(
|
|
114
|
+
def __init__(self, data_generator: Type[ClarifaiDataLoader], dataset_id: str) -> None:
|
|
115
|
+
super().__init__(data_generator, dataset_id)
|
|
116
116
|
|
|
117
117
|
def _extract_protos(self, batch_input_ids: List[str]
|
|
118
118
|
) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
|
|
@@ -125,22 +125,22 @@ class VisualSegmentationDataset(ClarifaiDataset):
|
|
|
125
125
|
"""
|
|
126
126
|
input_protos, annotation_protos = [], []
|
|
127
127
|
|
|
128
|
-
def
|
|
129
|
-
|
|
128
|
+
def process_data_item(id):
|
|
129
|
+
data_item = self.data_generator[id]
|
|
130
130
|
metadata = Struct()
|
|
131
|
-
image =
|
|
132
|
-
labels =
|
|
133
|
-
_polygons =
|
|
134
|
-
input_id = f"{self.dataset_id}-{
|
|
135
|
-
if
|
|
136
|
-
metadata.update(
|
|
131
|
+
image = data_item.image_path
|
|
132
|
+
labels = data_item.labels
|
|
133
|
+
_polygons = data_item.polygons # list of polygons: [[[x,y],...,[x,y]],...]
|
|
134
|
+
input_id = f"{self.dataset_id}-{id}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
|
135
|
+
if data_item.metadata is not None:
|
|
136
|
+
metadata.update(data_item.metadata)
|
|
137
137
|
else:
|
|
138
|
-
metadata.update({"filename": os.path.basename(image)
|
|
139
|
-
geo_info =
|
|
138
|
+
metadata.update({"filename": os.path.basename(image)})
|
|
139
|
+
geo_info = data_item.geo_info
|
|
140
140
|
|
|
141
141
|
self.all_input_ids[id] = input_id
|
|
142
142
|
input_protos.append(
|
|
143
|
-
|
|
143
|
+
Inputs.get_input_from_file(
|
|
144
144
|
input_id=input_id,
|
|
145
145
|
image_file=image,
|
|
146
146
|
dataset_id=self.dataset_id,
|
|
@@ -152,13 +152,12 @@ class VisualSegmentationDataset(ClarifaiDataset):
|
|
|
152
152
|
for i, _polygon in enumerate(_polygons):
|
|
153
153
|
try:
|
|
154
154
|
annotation_protos.append(
|
|
155
|
-
|
|
156
|
-
input_id=input_id, label=labels[i], polygons=_polygon))
|
|
155
|
+
Inputs.get_mask_proto(input_id=input_id, label=labels[i], polygons=_polygon))
|
|
157
156
|
except IndexError:
|
|
158
157
|
continue
|
|
159
158
|
|
|
160
159
|
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
161
|
-
futures = [executor.submit(
|
|
160
|
+
futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
|
|
162
161
|
for job in futures:
|
|
163
162
|
job.result()
|
|
164
163
|
|
|
@@ -1,12 +1,8 @@
|
|
|
1
|
-
#! COCO
|
|
1
|
+
#! COCO image captioning dataset
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
|
-
import zipfile
|
|
5
|
-
from glob import glob
|
|
6
4
|
|
|
7
|
-
import requests
|
|
8
5
|
from pycocotools.coco import COCO
|
|
9
|
-
from tqdm import tqdm
|
|
10
6
|
|
|
11
7
|
from clarifai.datasets.upload.base import ClarifaiDataLoader
|
|
12
8
|
|
|
@@ -14,90 +10,40 @@ from ..features import VisualClassificationFeatures
|
|
|
14
10
|
|
|
15
11
|
|
|
16
12
|
class COCOCaptionsDataLoader(ClarifaiDataLoader):
|
|
17
|
-
"""COCO
|
|
13
|
+
"""COCO Image Captioning Dataset."""
|
|
18
14
|
|
|
19
|
-
def __init__(self,
|
|
20
|
-
"""
|
|
15
|
+
def __init__(self, images_dir, label_filepath):
|
|
16
|
+
"""
|
|
21
17
|
Args:
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
split: "train" or "val"
|
|
18
|
+
images_dir: Directory containing the images.
|
|
19
|
+
label_filepath: Path to the COCO annotation file.
|
|
25
20
|
"""
|
|
26
|
-
self.
|
|
27
|
-
|
|
28
|
-
"val": "val2017.zip",
|
|
29
|
-
"annotations": "annotations_trainval2017.zip"
|
|
30
|
-
}
|
|
31
|
-
self.split = split
|
|
32
|
-
self.url = "http://images.cocodataset.org/zips/" # coco base image-zip url
|
|
33
|
-
self.data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
|
34
|
-
"data") # data storage directory
|
|
35
|
-
self.extracted_coco_dirs = {"train": None, "val": None, "annotations": None}
|
|
21
|
+
self.images_dir = images_dir
|
|
22
|
+
self.label_filepath = label_filepath
|
|
36
23
|
|
|
24
|
+
self.map_ids = {}
|
|
37
25
|
self.load_data()
|
|
38
26
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
os.mkdir(save_dir)
|
|
43
|
-
|
|
44
|
-
#check if train, val and annotation dirs exist
|
|
45
|
-
#so that the coco2017 data isn't downloaded
|
|
46
|
-
for key, filename in self.filenames.items():
|
|
47
|
-
existing_files = glob(f"{save_dir}/{key}*")
|
|
48
|
-
if existing_files:
|
|
49
|
-
print(f"{key} dataset already downloded and extracted")
|
|
50
|
-
continue
|
|
51
|
-
|
|
52
|
-
print("-" * 80)
|
|
53
|
-
print(f"Downloading {filename}")
|
|
54
|
-
print("-" * 80)
|
|
55
|
-
|
|
56
|
-
if "annotations" in filename:
|
|
57
|
-
self.url = "http://images.cocodataset.org/annotations/"
|
|
27
|
+
@property
|
|
28
|
+
def task(self):
|
|
29
|
+
return "visual_captioning"
|
|
58
30
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
for chunk in tqdm(response.iter_content(chunk_size=5124000)):
|
|
63
|
-
if chunk:
|
|
64
|
-
_file.write(chunk)
|
|
65
|
-
print("Data download complete...")
|
|
31
|
+
def load_data(self) -> None:
|
|
32
|
+
self.coco = COCO(self.label_filepath)
|
|
33
|
+
self.map_ids = {i: img_id for i, img_id in enumerate(list(self.coco.imgs.keys()))}
|
|
66
34
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
print(f" Extracting {filename} file")
|
|
70
|
-
zf.extractall(path=save_dir)
|
|
71
|
-
# Delete coco zip
|
|
72
|
-
print(f" Deleting {filename}")
|
|
73
|
-
os.remove(path=os.path.join(save_dir, filename))
|
|
74
|
-
|
|
75
|
-
def load_data(self):
|
|
76
|
-
if isinstance(self.filenames, dict) and len(self.filenames) == 3:
|
|
77
|
-
self.coco_download(self.data_dir)
|
|
78
|
-
self.extracted_coco_dirs["train"] = [os.path.join(self.data_dir, i) \
|
|
79
|
-
for i in os.listdir(self.data_dir) if "train" in i][0]
|
|
80
|
-
self.extracted_coco_dirs["val"] = [os.path.join(self.data_dir, i) \
|
|
81
|
-
for i in os.listdir(self.data_dir) if "val" in i][0]
|
|
82
|
-
|
|
83
|
-
self.extracted_coco_dirs["annotations"] = [os.path.join(self.data_dir, i) \
|
|
84
|
-
for i in os.listdir(self.data_dir) if "annotations" in i][0]
|
|
85
|
-
else:
|
|
86
|
-
raise Exception(f"`filenames` must be a dict of atleast 2 coco zip file names; \
|
|
87
|
-
train, val and annotations. Found {len(self.filenames)} items instead.")
|
|
35
|
+
def __len__(self):
|
|
36
|
+
return len(self.coco.imgs)
|
|
88
37
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
38
|
+
def __getitem__(self, index):
|
|
39
|
+
value = self.coco.imgs[self.map_ids[index]]
|
|
40
|
+
image_path = os.path.join(self.images_dir, value['file_name'])
|
|
41
|
+
annots = []
|
|
93
42
|
|
|
94
|
-
|
|
95
|
-
|
|
43
|
+
input_ann_ids = self.coco.getAnnIds(imgIds=[value['id']])
|
|
44
|
+
input_anns = self.coco.loadAnns(input_ann_ids)
|
|
96
45
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
image_path = glob(
|
|
100
|
-
os.path.join(self.extracted_coco_dirs[self.split],
|
|
101
|
-
f"{str(annot['image_id']).zfill(12)}*"))[0]
|
|
46
|
+
for ann in input_anns:
|
|
47
|
+
annots.append(ann['caption'])
|
|
102
48
|
|
|
103
|
-
return VisualClassificationFeatures(image_path,
|
|
49
|
+
return VisualClassificationFeatures(image_path, labels=annots[0], id=str(value['id']))
|