clarifai 9.7.0__py3-none-any.whl → 9.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/auth/__init__.py +6 -0
- clarifai/auth/helper.py +35 -36
- clarifai/auth/register.py +23 -0
- clarifai/{client → auth}/stub.py +10 -10
- clarifai/client/__init__.py +1 -4
- clarifai/client/app.py +483 -0
- clarifai/client/auth/__init__.py +4 -0
- clarifai/client/{abc.py → auth/abc.py} +2 -2
- clarifai/client/auth/helper.py +377 -0
- clarifai/client/auth/register.py +23 -0
- {clarifai_utils/client → clarifai/client/auth}/stub.py +10 -10
- clarifai/client/base.py +112 -0
- clarifai/client/dataset.py +290 -0
- clarifai/client/input.py +730 -0
- clarifai/client/lister.py +41 -0
- clarifai/client/model.py +218 -0
- clarifai/client/module.py +82 -0
- clarifai/client/user.py +125 -0
- clarifai/client/workflow.py +194 -0
- clarifai/datasets/upload/base.py +66 -0
- clarifai/datasets/upload/examples/README.md +31 -0
- clarifai/datasets/upload/examples/image_classification/cifar10/dataset.py +42 -0
- clarifai/datasets/upload/examples/image_classification/food-101/dataset.py +39 -0
- clarifai/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +37 -0
- clarifai/{data_upload/datasets → datasets/upload}/features.py +4 -12
- clarifai/datasets/upload/image.py +156 -0
- clarifai/datasets/upload/loaders/README.md +49 -0
- clarifai/{data_upload/datasets/zoo → datasets/upload/loaders}/coco_captions.py +24 -21
- {clarifai_utils/data_upload/datasets/zoo → clarifai/datasets/upload/loaders}/coco_detection.py +46 -42
- clarifai/datasets/upload/loaders/coco_segmentation.py +166 -0
- clarifai/{data_upload/datasets/zoo → datasets/upload/loaders}/imagenet_classification.py +22 -12
- clarifai/{data_upload/datasets/zoo → datasets/upload/loaders}/xview_detection.py +44 -53
- clarifai/datasets/upload/text.py +50 -0
- clarifai/datasets/upload/utils.py +62 -0
- clarifai/errors.py +90 -0
- clarifai/urls/helper.py +16 -17
- clarifai/utils/logging.py +40 -0
- clarifai/utils/misc.py +33 -0
- clarifai/versions.py +6 -0
- {clarifai-9.7.0.dist-info → clarifai-9.7.2.dist-info}/LICENSE +1 -1
- clarifai-9.7.2.dist-info/METADATA +179 -0
- clarifai-9.7.2.dist-info/RECORD +350 -0
- clarifai_utils/auth/__init__.py +6 -0
- clarifai_utils/auth/helper.py +35 -36
- clarifai_utils/auth/register.py +23 -0
- clarifai_utils/auth/stub.py +127 -0
- clarifai_utils/client/__init__.py +1 -4
- clarifai_utils/client/app.py +483 -0
- clarifai_utils/client/auth/__init__.py +4 -0
- clarifai_utils/client/{abc.py → auth/abc.py} +2 -2
- clarifai_utils/client/auth/helper.py +377 -0
- clarifai_utils/client/auth/register.py +23 -0
- clarifai_utils/client/auth/stub.py +127 -0
- clarifai_utils/client/base.py +112 -0
- clarifai_utils/client/dataset.py +290 -0
- clarifai_utils/client/input.py +730 -0
- clarifai_utils/client/lister.py +41 -0
- clarifai_utils/client/model.py +218 -0
- clarifai_utils/client/module.py +82 -0
- clarifai_utils/client/user.py +125 -0
- clarifai_utils/client/workflow.py +194 -0
- clarifai_utils/datasets/upload/base.py +66 -0
- clarifai_utils/datasets/upload/examples/README.md +31 -0
- clarifai_utils/datasets/upload/examples/image_classification/cifar10/dataset.py +42 -0
- clarifai_utils/datasets/upload/examples/image_classification/food-101/dataset.py +39 -0
- clarifai_utils/datasets/upload/examples/text_classification/imdb_dataset/dataset.py +37 -0
- clarifai_utils/{data_upload/datasets → datasets/upload}/features.py +4 -12
- clarifai_utils/datasets/upload/image.py +156 -0
- clarifai_utils/datasets/upload/loaders/README.md +49 -0
- clarifai_utils/{data_upload/datasets/zoo → datasets/upload/loaders}/coco_captions.py +24 -21
- {clarifai/data_upload/datasets/zoo → clarifai_utils/datasets/upload/loaders}/coco_detection.py +46 -42
- clarifai_utils/datasets/upload/loaders/coco_segmentation.py +166 -0
- clarifai_utils/{data_upload/datasets/zoo → datasets/upload/loaders}/imagenet_classification.py +22 -12
- clarifai_utils/{data_upload/datasets/zoo → datasets/upload/loaders}/xview_detection.py +44 -53
- clarifai_utils/datasets/upload/text.py +50 -0
- clarifai_utils/datasets/upload/utils.py +62 -0
- clarifai_utils/errors.py +90 -0
- clarifai_utils/urls/helper.py +16 -17
- clarifai_utils/utils/logging.py +40 -0
- clarifai_utils/utils/misc.py +33 -0
- clarifai_utils/versions.py +6 -0
- clarifai/data_upload/README.md +0 -63
- clarifai/data_upload/convert_csv.py +0 -182
- clarifai/data_upload/datasets/base.py +0 -87
- clarifai/data_upload/datasets/image.py +0 -253
- clarifai/data_upload/datasets/text.py +0 -60
- clarifai/data_upload/datasets/zoo/README.md +0 -55
- clarifai/data_upload/datasets/zoo/coco_segmentation.py +0 -160
- clarifai/data_upload/examples/README.md +0 -5
- clarifai/data_upload/examples/image_classification/cifar10/dataset.py +0 -40
- clarifai/data_upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai/data_upload/examples/image_classification/food-101/images/beignets/1036242.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/beignets/1114182.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/beignets/2012944.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/beignets/2464389.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/beignets/478632.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/hamburger/1061270.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/hamburger/1202261.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/hamburger/1381751.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/hamburger/3289634.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/hamburger/862025.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/prime_rib/102197.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/prime_rib/2749372.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/prime_rib/2938268.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/prime_rib/3590861.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/prime_rib/746716.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/ramen/2955110.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/ramen/3208966.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/ramen/3270629.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/ramen/3424562.jpg +0 -0
- clarifai/data_upload/examples/image_classification/food-101/images/ramen/544680.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/annotations/2007_000464.xml +0 -39
- clarifai/data_upload/examples/image_detection/voc/annotations/2008_000853.xml +0 -28
- clarifai/data_upload/examples/image_detection/voc/annotations/2008_003182.xml +0 -54
- clarifai/data_upload/examples/image_detection/voc/annotations/2008_008526.xml +0 -67
- clarifai/data_upload/examples/image_detection/voc/annotations/2009_004315.xml +0 -28
- clarifai/data_upload/examples/image_detection/voc/annotations/2009_004382.xml +0 -28
- clarifai/data_upload/examples/image_detection/voc/annotations/2011_000430.xml +0 -28
- clarifai/data_upload/examples/image_detection/voc/annotations/2011_001610.xml +0 -46
- clarifai/data_upload/examples/image_detection/voc/annotations/2011_006412.xml +0 -99
- clarifai/data_upload/examples/image_detection/voc/annotations/2012_000690.xml +0 -43
- clarifai/data_upload/examples/image_detection/voc/dataset.py +0 -76
- clarifai/data_upload/examples/image_detection/voc/images/2007_000464.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/images/2008_000853.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/images/2008_003182.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/images/2008_008526.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/images/2009_004315.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/images/2009_004382.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/images/2011_000430.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/images/2011_001610.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/images/2011_006412.jpg +0 -0
- clarifai/data_upload/examples/image_detection/voc/images/2012_000690.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/annotations/instances_val2017_subset.json +0 -5342
- clarifai/data_upload/examples/image_segmentation/coco/dataset.py +0 -107
- clarifai/data_upload/examples/image_segmentation/coco/images/000000074646.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/images/000000086956.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/images/000000166563.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/images/000000176857.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/images/000000182202.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/images/000000193245.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/images/000000384850.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/images/000000409630.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/images/000000424349.jpg +0 -0
- clarifai/data_upload/examples/image_segmentation/coco/images/000000573008.jpg +0 -0
- clarifai/data_upload/examples/text_classification/imdb_dataset/dataset.py +0 -40
- clarifai/data_upload/examples.py +0 -17
- clarifai/data_upload/upload.py +0 -356
- clarifai/dataset_export/dataset_export_inputs.py +0 -205
- clarifai/listing/concepts.py +0 -37
- clarifai/listing/datasets.py +0 -37
- clarifai/listing/inputs.py +0 -111
- clarifai/listing/installed_module_versions.py +0 -40
- clarifai/listing/lister.py +0 -200
- clarifai/listing/models.py +0 -46
- clarifai/listing/module_versions.py +0 -42
- clarifai/listing/modules.py +0 -36
- clarifai/runners/base.py +0 -140
- clarifai/runners/example.py +0 -36
- clarifai-9.7.0.dist-info/METADATA +0 -99
- clarifai-9.7.0.dist-info/RECORD +0 -456
- clarifai_utils/data_upload/README.md +0 -63
- clarifai_utils/data_upload/convert_csv.py +0 -182
- clarifai_utils/data_upload/datasets/base.py +0 -87
- clarifai_utils/data_upload/datasets/image.py +0 -253
- clarifai_utils/data_upload/datasets/text.py +0 -60
- clarifai_utils/data_upload/datasets/zoo/README.md +0 -55
- clarifai_utils/data_upload/datasets/zoo/coco_segmentation.py +0 -160
- clarifai_utils/data_upload/examples/README.md +0 -5
- clarifai_utils/data_upload/examples/image_classification/cifar10/dataset.py +0 -40
- clarifai_utils/data_upload/examples/image_classification/food-101/dataset.py +0 -39
- clarifai_utils/data_upload/examples/image_classification/food-101/images/beignets/1036242.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/beignets/1114182.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/beignets/2012944.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/beignets/2464389.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/beignets/478632.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/hamburger/1061270.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/hamburger/1202261.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/hamburger/1381751.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/hamburger/3289634.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/hamburger/862025.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/prime_rib/102197.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/prime_rib/2749372.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/prime_rib/2938268.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/prime_rib/3590861.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/prime_rib/746716.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/ramen/2955110.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/ramen/3208966.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/ramen/3270629.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/ramen/3424562.jpg +0 -0
- clarifai_utils/data_upload/examples/image_classification/food-101/images/ramen/544680.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/__init__.py +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/__init__.py +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2007_000464.xml +0 -39
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2008_000853.xml +0 -28
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2008_003182.xml +0 -54
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2008_008526.xml +0 -67
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2009_004315.xml +0 -28
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2009_004382.xml +0 -28
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2011_000430.xml +0 -28
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2011_001610.xml +0 -46
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2011_006412.xml +0 -99
- clarifai_utils/data_upload/examples/image_detection/voc/annotations/2012_000690.xml +0 -43
- clarifai_utils/data_upload/examples/image_detection/voc/dataset.py +0 -76
- clarifai_utils/data_upload/examples/image_detection/voc/images/2007_000464.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/images/2008_000853.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/images/2008_003182.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/images/2008_008526.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/images/2009_004315.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/images/2009_004382.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/images/2011_000430.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/images/2011_001610.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/images/2011_006412.jpg +0 -0
- clarifai_utils/data_upload/examples/image_detection/voc/images/2012_000690.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/__init__.py +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/__init__.py +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/annotations/instances_val2017_subset.json +0 -5342
- clarifai_utils/data_upload/examples/image_segmentation/coco/dataset.py +0 -107
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000074646.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000086956.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000166563.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000176857.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000182202.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000193245.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000384850.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000409630.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000424349.jpg +0 -0
- clarifai_utils/data_upload/examples/image_segmentation/coco/images/000000573008.jpg +0 -0
- clarifai_utils/data_upload/examples/text_classification/__init__.py +0 -0
- clarifai_utils/data_upload/examples/text_classification/imdb_dataset/__init__.py +0 -0
- clarifai_utils/data_upload/examples/text_classification/imdb_dataset/dataset.py +0 -40
- clarifai_utils/data_upload/examples.py +0 -17
- clarifai_utils/data_upload/upload.py +0 -356
- clarifai_utils/dataset_export/dataset_export_inputs.py +0 -205
- clarifai_utils/listing/__init__.py +0 -0
- clarifai_utils/listing/concepts.py +0 -37
- clarifai_utils/listing/datasets.py +0 -37
- clarifai_utils/listing/inputs.py +0 -111
- clarifai_utils/listing/installed_module_versions.py +0 -40
- clarifai_utils/listing/lister.py +0 -200
- clarifai_utils/listing/models.py +0 -46
- clarifai_utils/listing/module_versions.py +0 -42
- clarifai_utils/listing/modules.py +0 -36
- clarifai_utils/runners/__init__.py +0 -0
- clarifai_utils/runners/base.py +0 -140
- clarifai_utils/runners/example.py +0 -36
- /clarifai/{data_upload/__init__.py → cli.py} +0 -0
- /clarifai/{data_upload/datasets → datasets}/__init__.py +0 -0
- /clarifai/{data_upload/datasets/zoo → datasets/upload}/__init__.py +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/__init__.py +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/__init__.py +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/cifar_small_test.csv +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/cifar_small_train.csv +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/__init__.py +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- /clarifai/{data_upload/examples/image_detection → datasets/upload/examples/text_classification}/__init__.py +0 -0
- /clarifai/{data_upload/examples/image_detection/voc → datasets/upload/examples/text_classification/imdb_dataset}/__init__.py +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/text_classification/imdb_dataset/test.csv +0 -0
- /clarifai/{data_upload → datasets/upload}/examples/text_classification/imdb_dataset/train.csv +0 -0
- /clarifai/{data_upload/examples/image_segmentation → datasets/upload/loaders}/__init__.py +0 -0
- /clarifai/{data_upload/examples/image_segmentation/coco → utils}/__init__.py +0 -0
- {clarifai-9.7.0.dist-info → clarifai-9.7.2.dist-info}/WHEEL +0 -0
- {clarifai-9.7.0.dist-info → clarifai-9.7.2.dist-info}/entry_points.txt +0 -0
- {clarifai-9.7.0.dist-info → clarifai-9.7.2.dist-info}/top_level.txt +0 -0
- /clarifai/data_upload/examples/text_classification/__init__.py → /clarifai_utils/cli.py +0 -0
- {clarifai/data_upload/examples/text_classification/imdb_dataset → clarifai_utils/datasets}/__init__.py +0 -0
- {clarifai/listing → clarifai_utils/datasets/upload}/__init__.py +0 -0
- {clarifai/runners → clarifai_utils/datasets/upload/examples/image_classification}/__init__.py +0 -0
- /clarifai_utils/{data_upload → datasets/upload/examples/image_classification/cifar10}/__init__.py +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/cifar_small_test.csv +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/cifar_small_train.csv +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_700.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_701.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_702.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_703.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_704.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_705.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_706.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_707.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_708.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/cifar10/images/test_batch_709.jpg +0 -0
- /clarifai_utils/{data_upload/datasets → datasets/upload/examples/image_classification/food-101}/__init__.py +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/1420783.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/3287885.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/3617075.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/38052.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/beignets/39147.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/139558.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/1636096.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/2480925.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/3385808.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/hamburger/3647386.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/1826869.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/2243245.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/259212.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/2842688.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/prime_rib/3035414.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/1545393.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/2427642.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/3520891.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/377566.jpg +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/image_classification/food-101/images/ramen/503504.jpg +0 -0
- /clarifai_utils/{data_upload/datasets/zoo → datasets/upload/examples/text_classification}/__init__.py +0 -0
- /clarifai_utils/{data_upload/examples/image_classification → datasets/upload/examples/text_classification/imdb_dataset}/__init__.py +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/text_classification/imdb_dataset/test.csv +0 -0
- /clarifai_utils/{data_upload → datasets/upload}/examples/text_classification/imdb_dataset/train.csv +0 -0
- /clarifai_utils/{data_upload/examples/image_classification/cifar10 → datasets/upload/loaders}/__init__.py +0 -0
- /clarifai_utils/{data_upload/examples/image_classification/food-101 → utils}/__init__.py +0 -0
|
@@ -0,0 +1,730 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
6
|
+
from multiprocessing import cpu_count
|
|
7
|
+
from typing import List, Union
|
|
8
|
+
|
|
9
|
+
from clarifai_grpc.grpc.api import resources_pb2, service_pb2 # noqa: F401
|
|
10
|
+
from clarifai_grpc.grpc.api.resources_pb2 import Annotation, Audio, Image, Input, Text, Video
|
|
11
|
+
from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
|
|
12
|
+
from google.protobuf.json_format import MessageToDict
|
|
13
|
+
from google.protobuf.struct_pb2 import Struct
|
|
14
|
+
from tqdm import tqdm
|
|
15
|
+
|
|
16
|
+
from clarifai.client.base import BaseClient
|
|
17
|
+
from clarifai.client.lister import Lister
|
|
18
|
+
from clarifai.errors import UserError
|
|
19
|
+
from clarifai.utils.logging import get_logger
|
|
20
|
+
from clarifai.utils.misc import BackoffIterator, Chunker
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class Inputs(Lister, BaseClient):
|
|
24
|
+
"""Inputs is a class that provides access to Clarifai API endpoints related to Input information."""
|
|
25
|
+
|
|
26
|
+
def __init__(self, user_id: str = "", app_id: str = "", logger_level: str = "INFO", **kwargs):
|
|
27
|
+
"""Initializes an Input object.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
user_id (str): A user ID for authentication.
|
|
31
|
+
app_id (str): An app ID for the application to interact with.
|
|
32
|
+
**kwargs: Additional keyword arguments to be passed to the Input
|
|
33
|
+
"""
|
|
34
|
+
self.user_id = user_id
|
|
35
|
+
self.app_id = app_id
|
|
36
|
+
self.kwargs = {**kwargs}
|
|
37
|
+
self.input_info = resources_pb2.Input(**self.kwargs)
|
|
38
|
+
self.logger = get_logger(logger_level=logger_level, name=__name__)
|
|
39
|
+
BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id)
|
|
40
|
+
Lister.__init__(self)
|
|
41
|
+
|
|
42
|
+
def _get_proto(self,
|
|
43
|
+
input_id: str,
|
|
44
|
+
dataset_id: Union[str, None],
|
|
45
|
+
imagepb: Image = None,
|
|
46
|
+
video_pb: Video = None,
|
|
47
|
+
audio_pb: Audio = None,
|
|
48
|
+
text_pb: Text = None,
|
|
49
|
+
geo_info: List = None,
|
|
50
|
+
labels: List = None,
|
|
51
|
+
metadata: Struct = None) -> Input:
|
|
52
|
+
"""Create input proto for image data type.
|
|
53
|
+
Args:
|
|
54
|
+
input_id (str): The input ID for the input to create.
|
|
55
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
56
|
+
imagepb (Image): The image proto to be used for the input.
|
|
57
|
+
video_pb (Video): The video proto to be used for the input.
|
|
58
|
+
audio_pb (Audio): The audio proto to be used for the input.
|
|
59
|
+
text_pb (Text): The text proto to be used for the input.
|
|
60
|
+
geo_info (list): A list of longitude and latitude for the geo point.
|
|
61
|
+
labels (list): A list of labels for the input.
|
|
62
|
+
metadata (Struct): A Struct of metadata for the input.
|
|
63
|
+
Returns:
|
|
64
|
+
Input: An Input object for the specified input ID.
|
|
65
|
+
"""
|
|
66
|
+
assert geo_info is None or isinstance(
|
|
67
|
+
geo_info, list), "geo_info must be a list of longitude and latitude"
|
|
68
|
+
assert labels is None or isinstance(labels, list), "labels must be a list of strings"
|
|
69
|
+
assert metadata is None or isinstance(metadata, Struct), "metadata must be a Struct"
|
|
70
|
+
geo_pb = resources_pb2.Geo(geo_point=resources_pb2.GeoPoint(
|
|
71
|
+
longitude=geo_info[0], latitude=geo_info[1])) if geo_info else None
|
|
72
|
+
concepts=[
|
|
73
|
+
resources_pb2.Concept(
|
|
74
|
+
id=f"id-{''.join(_label.split(' '))}", name=_label, value=1.)\
|
|
75
|
+
for _label in labels
|
|
76
|
+
]if labels else None
|
|
77
|
+
|
|
78
|
+
if dataset_id:
|
|
79
|
+
return resources_pb2.Input(
|
|
80
|
+
id=input_id,
|
|
81
|
+
dataset_ids=[dataset_id],
|
|
82
|
+
data=resources_pb2.Data(
|
|
83
|
+
image=imagepb,
|
|
84
|
+
video=video_pb,
|
|
85
|
+
audio=audio_pb,
|
|
86
|
+
text=text_pb,
|
|
87
|
+
geo=geo_pb,
|
|
88
|
+
concepts=concepts,
|
|
89
|
+
metadata=metadata))
|
|
90
|
+
|
|
91
|
+
return resources_pb2.Input(
|
|
92
|
+
id=input_id,
|
|
93
|
+
data=resources_pb2.Data(
|
|
94
|
+
image=imagepb,
|
|
95
|
+
video=video_pb,
|
|
96
|
+
audio=audio_pb,
|
|
97
|
+
text=text_pb,
|
|
98
|
+
geo=geo_pb,
|
|
99
|
+
concepts=concepts,
|
|
100
|
+
metadata=metadata))
|
|
101
|
+
|
|
102
|
+
def get_input_from_url(self,
|
|
103
|
+
input_id: str,
|
|
104
|
+
image_url: str = None,
|
|
105
|
+
video_url: str = None,
|
|
106
|
+
audio_url: str = None,
|
|
107
|
+
text_url: str = None,
|
|
108
|
+
dataset_id: str = None,
|
|
109
|
+
**kwargs) -> Input:
|
|
110
|
+
"""Create input proto from url.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
input_id (str): The input ID for the input to create.
|
|
114
|
+
image_url (str): The url for the image.
|
|
115
|
+
video_url (str): The url for the video.
|
|
116
|
+
audio_url (str): The url for the audio.
|
|
117
|
+
text_url (str): The url for the text.
|
|
118
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Input: An Input object for the specified input ID.
|
|
122
|
+
|
|
123
|
+
Example:
|
|
124
|
+
>>> from clarifai.client.input import Input
|
|
125
|
+
>>> input_obj = Input()
|
|
126
|
+
>>> input_proto = input_obj.get_input_from_url(input_id = 'demo', image_url='https://samples.clarifai.com/metro-north.jpg')
|
|
127
|
+
"""
|
|
128
|
+
if not any((image_url, video_url, audio_url, text_url)):
|
|
129
|
+
raise ValueError(
|
|
130
|
+
"At least one of image_url, video_url, audio_url, text_url must be provided.")
|
|
131
|
+
image_pb = resources_pb2.Image(url=image_url) if image_url else None
|
|
132
|
+
video_pb = resources_pb2.Video(url=video_url) if video_url else None
|
|
133
|
+
audio_pb = resources_pb2.Audio(url=audio_url) if audio_url else None
|
|
134
|
+
text_pb = resources_pb2.Text(url=text_url) if text_url else None
|
|
135
|
+
return self._get_proto(
|
|
136
|
+
input_id=input_id,
|
|
137
|
+
dataset_id=dataset_id,
|
|
138
|
+
imagepb=image_pb,
|
|
139
|
+
video_pb=video_pb,
|
|
140
|
+
audio_pb=audio_pb,
|
|
141
|
+
text_pb=text_pb,
|
|
142
|
+
**kwargs)
|
|
143
|
+
|
|
144
|
+
def get_input_from_file(self,
|
|
145
|
+
input_id: str,
|
|
146
|
+
image_file: str = None,
|
|
147
|
+
video_file: str = None,
|
|
148
|
+
audio_file: str = None,
|
|
149
|
+
dataset_id: str = None,
|
|
150
|
+
**kwargs) -> Input:
|
|
151
|
+
"""Create input proto from files.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
input_id (str): The input ID for the input to create.
|
|
155
|
+
image_file (str): The url for the image.
|
|
156
|
+
video_file (str): The url for the video.
|
|
157
|
+
audio_file (str): The url for the audio.
|
|
158
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Input: An Input object for the specified input ID.
|
|
162
|
+
|
|
163
|
+
Example:
|
|
164
|
+
>>> from clarifai.client.input import Input
|
|
165
|
+
>>> input_obj = Input()
|
|
166
|
+
>>> input_proto = input_obj.get_input_from_file(input_id = 'demo', video_file='file_path')
|
|
167
|
+
"""
|
|
168
|
+
if not any((image_file, video_file, audio_file)):
|
|
169
|
+
raise ValueError("At least one of image_file, video_file, audio_file, must be provided.")
|
|
170
|
+
image_pb = resources_pb2.Image(base64=open(image_file, 'rb').read()) if image_file else None
|
|
171
|
+
video_pb = resources_pb2.Video(base64=open(video_file, 'rb').read()) if video_file else None
|
|
172
|
+
audio_pb = resources_pb2.Audio(base64=open(audio_file, 'rb').read()) if audio_file else None
|
|
173
|
+
return self._get_proto(
|
|
174
|
+
input_id=input_id,
|
|
175
|
+
dataset_id=dataset_id,
|
|
176
|
+
imagepb=image_pb,
|
|
177
|
+
video_pb=video_pb,
|
|
178
|
+
audio_pb=audio_pb,
|
|
179
|
+
**kwargs)
|
|
180
|
+
|
|
181
|
+
def get_input_from_bytes(self,
|
|
182
|
+
input_id: str,
|
|
183
|
+
image_bytes: bytes = None,
|
|
184
|
+
video_bytes: bytes = None,
|
|
185
|
+
audio_bytes: bytes = None,
|
|
186
|
+
dataset_id: str = None,
|
|
187
|
+
**kwargs) -> Input:
|
|
188
|
+
"""Create input proto from bytes.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
input_id (str): The input ID for the input to create.
|
|
192
|
+
image_bytes (str): The bytes for the image.
|
|
193
|
+
video_bytes (str): The bytes for the video.
|
|
194
|
+
audio_bytes (str): The bytes for the audio.
|
|
195
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
Input: An Input object for the specified input ID.
|
|
199
|
+
|
|
200
|
+
Example:
|
|
201
|
+
>>> from clarifai.client.input import Input
|
|
202
|
+
>>> input_obj = Input()
|
|
203
|
+
>>> image = open('demo.jpg', 'rb').read()
|
|
204
|
+
>>> video = open('demo.mp4', 'rb').read()
|
|
205
|
+
>>> input_proto = input_obj.get_input_from_bytes(input_id = 'demo',image_bytes =image, video_bytes=video)
|
|
206
|
+
"""
|
|
207
|
+
if not any((image_bytes, video_bytes, audio_bytes)):
|
|
208
|
+
raise ValueError("At least one of image_bytes, video_bytes, audio_bytes, must be provided.")
|
|
209
|
+
image_pb = resources_pb2.Image(base64=image_bytes) if image_bytes else None
|
|
210
|
+
video_pb = resources_pb2.Video(base64=video_bytes) if video_bytes else None
|
|
211
|
+
audio_pb = resources_pb2.Audio(base64=audio_bytes) if audio_bytes else None
|
|
212
|
+
return self._get_proto(
|
|
213
|
+
input_id=input_id,
|
|
214
|
+
dataset_id=dataset_id,
|
|
215
|
+
imagepb=image_pb,
|
|
216
|
+
video_pb=video_pb,
|
|
217
|
+
audio_pb=audio_pb,
|
|
218
|
+
**kwargs)
|
|
219
|
+
|
|
220
|
+
def get_image_inputs_from_folder(self,
|
|
221
|
+
folder_path: str,
|
|
222
|
+
dataset_id: str = None,
|
|
223
|
+
labels: bool = False) -> List[Input]: #image specific
|
|
224
|
+
"""Create input protos for image data type from folder.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
folder_path (str): Path to the folder containing images.
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
list of Input: A list of Input objects for the specified folder.
|
|
231
|
+
|
|
232
|
+
Example:
|
|
233
|
+
>>> from clarifai.client.input import Input
|
|
234
|
+
>>> input_obj = Input()
|
|
235
|
+
>>> input_protos = input_obj.get_image_inputs_from_folder(folder_path='demo_folder')
|
|
236
|
+
"""
|
|
237
|
+
input_protos = []
|
|
238
|
+
labels = [folder_path.split('/')[-1]] if labels else None
|
|
239
|
+
for filename in os.listdir(folder_path):
|
|
240
|
+
if filename.split('.')[-1] not in ['jpg', 'jpeg', 'png', 'tiff', 'webp']:
|
|
241
|
+
continue
|
|
242
|
+
input_id = filename.split('.')[0]
|
|
243
|
+
image_pb = resources_pb2.Image(base64=open(os.path.join(folder_path, filename), 'rb').read())
|
|
244
|
+
input_protos.append(
|
|
245
|
+
self._get_proto(
|
|
246
|
+
input_id=input_id, dataset_id=dataset_id, imagepb=image_pb, labels=labels))
|
|
247
|
+
return input_protos
|
|
248
|
+
|
|
249
|
+
def get_text_input(self, input_id: str, raw_text: str, dataset_id: str = None,
|
|
250
|
+
**kwargs) -> Text: #text specific
|
|
251
|
+
"""Create input proto for text data type from rawtext.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
input_id (str): The input ID for the input to create.
|
|
255
|
+
raw_text (str): The raw text input.
|
|
256
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
257
|
+
**kwargs: Additional keyword arguments to be passed to the Input
|
|
258
|
+
|
|
259
|
+
Returns:
|
|
260
|
+
Text: An Input object for the specified input ID.
|
|
261
|
+
|
|
262
|
+
Example:
|
|
263
|
+
>>> from clarifai.client.input import Input
|
|
264
|
+
>>> input_obj = Input()
|
|
265
|
+
>>> input_protos = input_obj.get_text_input(input_id = 'demo', raw_text = 'This is a test')
|
|
266
|
+
"""
|
|
267
|
+
text_pb = resources_pb2.Text(raw=raw_text)
|
|
268
|
+
return self._get_proto(input_id=input_id, dataset_id=dataset_id, text_pb=text_pb, **kwargs)
|
|
269
|
+
|
|
270
|
+
def get_text_input_from_csv(self, csv_path: str, dataset_id: str = None,
|
|
271
|
+
labels: str = True) -> List[Text]: #text specific
|
|
272
|
+
"""Create input proto for text data type from cscv.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
csv_path (str): Path to the csv file.
|
|
276
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
277
|
+
labels (str): True if csv file has labels column.
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
inputs: List of inputs
|
|
281
|
+
|
|
282
|
+
Example:
|
|
283
|
+
>>> from clarifai.client.input import Input
|
|
284
|
+
>>> input_obj = Input()
|
|
285
|
+
>>> input_protos = input_obj.get_text_input_from_csv(csv_path = 'filepath')
|
|
286
|
+
"""
|
|
287
|
+
input_protos = []
|
|
288
|
+
with open(csv_path) as _file:
|
|
289
|
+
reader = csv.reader(_file)
|
|
290
|
+
next(reader, None) # skip header
|
|
291
|
+
for id, input in enumerate(reader):
|
|
292
|
+
text = input[0]
|
|
293
|
+
if labels:
|
|
294
|
+
assert len(input) == 2, "csv file should have two columns(input, labels)"
|
|
295
|
+
labels = input[1] if isinstance(input[1], list) else [input[1]]
|
|
296
|
+
else:
|
|
297
|
+
labels = None
|
|
298
|
+
input_id = f"{dataset_id}-{id}"
|
|
299
|
+
input_protos.append(
|
|
300
|
+
self.get_text_input(
|
|
301
|
+
input_id=input_id, raw_text=text, dataset_id=dataset_id, labels=labels))
|
|
302
|
+
|
|
303
|
+
return input_protos
|
|
304
|
+
|
|
305
|
+
def get_text_inputs_from_folder(self,
|
|
306
|
+
folder_path: str,
|
|
307
|
+
dataset_id: str = None,
|
|
308
|
+
labels: bool = False) -> List[Text]: #text specific
|
|
309
|
+
"""Create input protos for text data type from folder.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
folder_path (str): Path to the folder containing text.
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
list of Input: A list of Input objects for the specified folder.
|
|
316
|
+
|
|
317
|
+
Example:
|
|
318
|
+
>>> from clarifai.client.input import Input
|
|
319
|
+
>>> input_obj = Input()
|
|
320
|
+
>>> input_protos = input_obj.get_text_inputs_from_folder(folder_path='demo_folder')
|
|
321
|
+
"""
|
|
322
|
+
input_protos = []
|
|
323
|
+
labels = [folder_path.split('/')[-1]] if labels else None
|
|
324
|
+
for filename in os.listdir(folder_path):
|
|
325
|
+
if filename.split('.')[-1] != 'txt':
|
|
326
|
+
continue
|
|
327
|
+
input_id = filename.split('.')[0]
|
|
328
|
+
text_pb = resources_pb2.Text(raw=open(os.path.join(folder_path, filename), 'rb').read())
|
|
329
|
+
input_protos.append(
|
|
330
|
+
self._get_proto(
|
|
331
|
+
input_id=input_id, dataset_id=dataset_id, text_pb=text_pb, labels=labels))
|
|
332
|
+
return input_protos
|
|
333
|
+
|
|
334
|
+
def get_annotation_proto(self, input_id: str, label: str, annotations: List) -> Annotation:
|
|
335
|
+
"""Create an annotation proto for each bounding box, label input pair.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
input_id (str): The input ID for the annotation to create.
|
|
339
|
+
label (str): annotation label
|
|
340
|
+
annotations (List): a list of a single bbox's coordinates. # Annotations ordering: [xmin, ymin, xmax, ymax]
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
An annotation object for the specified input ID.
|
|
344
|
+
|
|
345
|
+
Example:
|
|
346
|
+
>>> from clarifai.client.input import Input
|
|
347
|
+
>>> input_obj = Input()
|
|
348
|
+
>>> input_obj.get_annotation_proto(input_id='demo', label='demo', annotations=[x_min, y_min, x_max, y_max])
|
|
349
|
+
"""
|
|
350
|
+
if not isinstance(annotations, list):
|
|
351
|
+
raise UserError("annotations must be a list of bbox cooridnates")
|
|
352
|
+
input_annot_proto = resources_pb2.Annotation(
|
|
353
|
+
input_id=input_id,
|
|
354
|
+
data=resources_pb2.Data(regions=[
|
|
355
|
+
resources_pb2.Region(
|
|
356
|
+
region_info=resources_pb2.RegionInfo(bounding_box=resources_pb2.BoundingBox(
|
|
357
|
+
# Annotations ordering: [xmin, ymin, xmax, ymax]
|
|
358
|
+
# top_row must be less than bottom row
|
|
359
|
+
# left_col must be less than right col
|
|
360
|
+
top_row=annotations[1], #y_min
|
|
361
|
+
left_col=annotations[0], #x_min
|
|
362
|
+
bottom_row=annotations[3], #y_max
|
|
363
|
+
right_col=annotations[2] #x_max
|
|
364
|
+
)),
|
|
365
|
+
data=resources_pb2.Data(concepts=[
|
|
366
|
+
resources_pb2.Concept(
|
|
367
|
+
id=f"id-{''.join(label.split(' '))}", name=label, value=1.)
|
|
368
|
+
]))
|
|
369
|
+
]))
|
|
370
|
+
|
|
371
|
+
return input_annot_proto
|
|
372
|
+
|
|
373
|
+
def get_mask_proto(self, input_id: str, label: str, polygons: List[List[float]]) -> Annotation:
|
|
374
|
+
"""Create an annotation proto for each polygon box, label input pair.
|
|
375
|
+
|
|
376
|
+
Args:
|
|
377
|
+
input_id (str): The input ID for the annotation to create.
|
|
378
|
+
label (str): annotation label
|
|
379
|
+
polygons (List): Polygon x,y points iterable
|
|
380
|
+
|
|
381
|
+
Returns:
|
|
382
|
+
An annotation object for the specified input ID.
|
|
383
|
+
|
|
384
|
+
Example:
|
|
385
|
+
>>> from clarifai.client.input import Input
|
|
386
|
+
>>> input_obj = Input()
|
|
387
|
+
>>> input_obj.get_mask_proto(input_id='demo', label='demo', polygons=[[[x,y],...,[x,y]],...])
|
|
388
|
+
"""
|
|
389
|
+
if not isinstance(polygons, list):
|
|
390
|
+
raise UserError("polygons must be a list of points")
|
|
391
|
+
input_mask_proto = resources_pb2.Annotation(
|
|
392
|
+
input_id=input_id,
|
|
393
|
+
data=resources_pb2.Data(regions=[
|
|
394
|
+
resources_pb2.Region(
|
|
395
|
+
region_info=resources_pb2.RegionInfo(polygon=resources_pb2.Polygon(
|
|
396
|
+
points=[
|
|
397
|
+
resources_pb2.Point(
|
|
398
|
+
row=_point[1], # row is y point
|
|
399
|
+
col=_point[0], # col is x point
|
|
400
|
+
visibility="VISIBLE") for _point in polygons
|
|
401
|
+
])),
|
|
402
|
+
data=resources_pb2.Data(concepts=[
|
|
403
|
+
resources_pb2.Concept(
|
|
404
|
+
id=f"id-{''.join(label.split(' '))}", name=label, value=1.)
|
|
405
|
+
]))
|
|
406
|
+
]))
|
|
407
|
+
|
|
408
|
+
return input_mask_proto
|
|
409
|
+
|
|
410
|
+
def upload_from_url(self,
|
|
411
|
+
input_id: str,
|
|
412
|
+
image_url: str = None,
|
|
413
|
+
video_url: str = None,
|
|
414
|
+
audio_url: str = None,
|
|
415
|
+
text_url: str = None,
|
|
416
|
+
dataset_id: str = None,
|
|
417
|
+
**kwargs) -> str:
|
|
418
|
+
"""Upload input from url.
|
|
419
|
+
|
|
420
|
+
Args:
|
|
421
|
+
input_id (str): The input ID for the input to create.
|
|
422
|
+
image_url (str): The url for the image.
|
|
423
|
+
video_url (str): The url for the video.
|
|
424
|
+
audio_url (str): The url for the audio.
|
|
425
|
+
text_url (str): The url for the text.
|
|
426
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
427
|
+
|
|
428
|
+
Returns:
|
|
429
|
+
input_job_id: job id for the upload request.
|
|
430
|
+
|
|
431
|
+
Example:
|
|
432
|
+
>>> from clarifai.client.input import Input
|
|
433
|
+
>>> input_obj = Input(user_id = 'user_id', app_id = 'demo_app')
|
|
434
|
+
>>> input_obj.upload_from_url(input_id='demo', image_url='https://samples.clarifai.com/metro-north.jpg')
|
|
435
|
+
"""
|
|
436
|
+
input_pb = self.get_input_from_url(input_id, image_url, video_url, audio_url, text_url,
|
|
437
|
+
dataset_id, **kwargs)
|
|
438
|
+
return self.upload_inputs([input_pb])
|
|
439
|
+
|
|
440
|
+
def upload_from_file(self,
|
|
441
|
+
input_id: str,
|
|
442
|
+
image_file: str = None,
|
|
443
|
+
video_file: str = None,
|
|
444
|
+
audio_file: str = None,
|
|
445
|
+
dataset_id: str = None,
|
|
446
|
+
**kwargs) -> str:
|
|
447
|
+
"""Upload input from file.
|
|
448
|
+
|
|
449
|
+
Args:
|
|
450
|
+
input_id (str): The input ID for the input to create.
|
|
451
|
+
image_file (str): The file for the image.
|
|
452
|
+
video_file (str): The file for the video.
|
|
453
|
+
audio_file (str): The file for the audio.
|
|
454
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
455
|
+
|
|
456
|
+
Returns:
|
|
457
|
+
input_job_id: job id for the upload request.
|
|
458
|
+
|
|
459
|
+
Example:
|
|
460
|
+
>>> from clarifai.client.input import Input
|
|
461
|
+
>>> input_obj = Input(user_id = 'user_id', app_id = 'demo_app')
|
|
462
|
+
>>> input_obj.upload_from_file(input_id='demo', audio_file='demo.mp3')
|
|
463
|
+
"""
|
|
464
|
+
input_pb = self.get_input_from_file(input_id, image_file, video_file, audio_file, dataset_id,
|
|
465
|
+
**kwargs)
|
|
466
|
+
return self.upload_inputs([input_pb])
|
|
467
|
+
|
|
468
|
+
def upload_from_bytes(self,
|
|
469
|
+
input_id: str,
|
|
470
|
+
image_bytes: bytes = None,
|
|
471
|
+
video_bytes: bytes = None,
|
|
472
|
+
audio_bytes: bytes = None,
|
|
473
|
+
dataset_id: str = None,
|
|
474
|
+
**kwargs) -> str:
|
|
475
|
+
"""Upload input from bytes.
|
|
476
|
+
|
|
477
|
+
Args:
|
|
478
|
+
input_id (str): The input ID for the input to create.
|
|
479
|
+
image_bytes (str): The bytes for the image.
|
|
480
|
+
video_bytes (str): The bytes for the video.
|
|
481
|
+
audio_bytes (str): The bytes for the audio.
|
|
482
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
483
|
+
|
|
484
|
+
Returns:
|
|
485
|
+
input_job_id: job id for the upload request.
|
|
486
|
+
|
|
487
|
+
Example:
|
|
488
|
+
>>> from clarifai.client.input import Input
|
|
489
|
+
>>> input_obj = Input(user_id = 'user_id', app_id = 'demo_app')
|
|
490
|
+
>>> image = open('demo.jpg', 'rb').read()
|
|
491
|
+
>>> input_obj.upload_from_bytes(input_id='demo', image_bytes=image)
|
|
492
|
+
"""
|
|
493
|
+
input_pb = self.get_input_from_bytes(input_id, image_bytes, video_bytes, audio_bytes,
|
|
494
|
+
dataset_id, **kwargs)
|
|
495
|
+
return self.upload_inputs([input_pb])
|
|
496
|
+
|
|
497
|
+
def upload_text(self, input_id: str, raw_text: str, dataset_id: str = None,
|
|
498
|
+
**kwargs) -> str: #text specific
|
|
499
|
+
"""Upload text from raw text.
|
|
500
|
+
|
|
501
|
+
Args:
|
|
502
|
+
input_id (str): The input ID for the input to create.
|
|
503
|
+
raw_text (str): The raw text.
|
|
504
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
|
505
|
+
|
|
506
|
+
Returns:
|
|
507
|
+
input_job_id (str): job id for the upload request.
|
|
508
|
+
|
|
509
|
+
Example:
|
|
510
|
+
>>> from clarifai.client.input import Input
|
|
511
|
+
>>> input_obj = Input(user_id = 'user_id', app_id = 'demo_app')
|
|
512
|
+
>>> input_obj.upload_text(input_id = 'demo', raw_text = 'This is a test')
|
|
513
|
+
"""
|
|
514
|
+
input_pb = self._get_proto(
|
|
515
|
+
input_id=input_id,
|
|
516
|
+
dataset_id=dataset_id,
|
|
517
|
+
text_pb=resources_pb2.Text(raw=raw_text),
|
|
518
|
+
**kwargs)
|
|
519
|
+
return self.upload_inputs([input_pb])
|
|
520
|
+
|
|
521
|
+
def upload_inputs(self, inputs: List[Input], show_log: bool = True) -> str:
|
|
522
|
+
"""Upload list of input objects to the app.
|
|
523
|
+
|
|
524
|
+
Args:
|
|
525
|
+
inputs (list): List of input objects to upload.
|
|
526
|
+
show_log (bool): Show upload status log.
|
|
527
|
+
|
|
528
|
+
Returns:
|
|
529
|
+
input_job_id: job id for the upload request.
|
|
530
|
+
"""
|
|
531
|
+
if not isinstance(inputs, list):
|
|
532
|
+
raise UserError("inputs must be a list of Input objects")
|
|
533
|
+
input_job_id = uuid.uuid4().hex # generate a unique id for this job
|
|
534
|
+
request = service_pb2.PostInputsRequest(
|
|
535
|
+
user_app_id=self.user_app_id, inputs=inputs, inputs_add_job_id=input_job_id)
|
|
536
|
+
response = self._grpc_request(self.STUB.PostInputs, request)
|
|
537
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
|
538
|
+
try:
|
|
539
|
+
self.logger.warning(response.inputs[0].status)
|
|
540
|
+
except IndexError:
|
|
541
|
+
self.logger.warning(response.status)
|
|
542
|
+
else:
|
|
543
|
+
if show_log:
|
|
544
|
+
self.logger.info("\nInputs Uploaded\n%s", response.status)
|
|
545
|
+
|
|
546
|
+
return input_job_id
|
|
547
|
+
|
|
548
|
+
def upload_annotations(self, batch_annot: List[resources_pb2.Annotation], show_log: bool = True
|
|
549
|
+
) -> Union[List[resources_pb2.Annotation], List[None]]:
|
|
550
|
+
"""Upload image annotations to app.
|
|
551
|
+
|
|
552
|
+
Args:
|
|
553
|
+
batch_annot: annot batch protos
|
|
554
|
+
|
|
555
|
+
Returns:
|
|
556
|
+
retry_upload: failed annot upload
|
|
557
|
+
"""
|
|
558
|
+
retry_upload = [] # those that fail to upload are stored for retries
|
|
559
|
+
request = service_pb2.PostAnnotationsRequest(
|
|
560
|
+
user_app_id=self.user_app_id, annotations=batch_annot)
|
|
561
|
+
response = self._grpc_request(self.STUB.PostAnnotations, request)
|
|
562
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
|
563
|
+
try:
|
|
564
|
+
self.logger.warning(
|
|
565
|
+
f"Post annotations failed, status: {response.annotations[0].status.details}")
|
|
566
|
+
except:
|
|
567
|
+
self.logger.warning(f"Post annotations failed, status: {response.status.details}")
|
|
568
|
+
finally:
|
|
569
|
+
retry_upload.extend(batch_annot)
|
|
570
|
+
else:
|
|
571
|
+
if show_log:
|
|
572
|
+
self.logger.info("\nAnnotations Uploaded\n%s", response.status)
|
|
573
|
+
return retry_upload
|
|
574
|
+
|
|
575
|
+
def _upload_batch(self, inputs: List[Input]) -> List[Input]:
|
|
576
|
+
"""Upload a batch of input objects to the app.
|
|
577
|
+
|
|
578
|
+
Args:
|
|
579
|
+
inputs (List[Input]): List of input objects to upload.
|
|
580
|
+
|
|
581
|
+
Returns:
|
|
582
|
+
input_job_id: job id for the upload request.
|
|
583
|
+
"""
|
|
584
|
+
input_job_id = self.upload_inputs(inputs, False)
|
|
585
|
+
self._wait_for_inputs(input_job_id)
|
|
586
|
+
failed_inputs = self._delete_failed_inputs(inputs)
|
|
587
|
+
|
|
588
|
+
return failed_inputs
|
|
589
|
+
|
|
590
|
+
def delete_inputs(self, inputs: List[Input]) -> None:
|
|
591
|
+
"""Delete list of input objects from the app.
|
|
592
|
+
|
|
593
|
+
Args:
|
|
594
|
+
input_ids (Input): List of input objects to delete.
|
|
595
|
+
|
|
596
|
+
Example:
|
|
597
|
+
>>> from clarifai.client.user import User
|
|
598
|
+
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
|
599
|
+
>>> input_obj.delete_inputs(input_obj.list_inputs())
|
|
600
|
+
"""
|
|
601
|
+
if not isinstance(inputs, list):
|
|
602
|
+
raise UserError("input_ids must be a list of input ids")
|
|
603
|
+
inputs_ids = [input.id for input in inputs]
|
|
604
|
+
request = service_pb2.DeleteInputsRequest(user_app_id=self.user_app_id, ids=inputs_ids)
|
|
605
|
+
response = self._grpc_request(self.STUB.DeleteInputs, request)
|
|
606
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
|
607
|
+
raise Exception(response.status)
|
|
608
|
+
self.logger.info("\nInputs Deleted\n%s", response.status)
|
|
609
|
+
|
|
610
|
+
def list_inputs(self) -> List[Input]: # TODO: update lister
|
|
611
|
+
"""Lists all the inputs for the app.
|
|
612
|
+
|
|
613
|
+
Returns:
|
|
614
|
+
list of Input: A list of Input objects for the app.
|
|
615
|
+
|
|
616
|
+
Example:
|
|
617
|
+
>>> from clarifai.client.user import User
|
|
618
|
+
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
|
619
|
+
>>> input_obj.list_inputs()
|
|
620
|
+
"""
|
|
621
|
+
request_data = dict(user_app_id=self.user_app_id, per_page=self.default_page_size)
|
|
622
|
+
all_inputs_info = list(
|
|
623
|
+
self.list_all_pages_generator(self.STUB.ListInputs, service_pb2.ListInputsRequest,
|
|
624
|
+
request_data))
|
|
625
|
+
for input_info in all_inputs_info:
|
|
626
|
+
input_info['id'] = input_info.pop('input_id')
|
|
627
|
+
return [resources_pb2.Input(**input_info) for input_info in all_inputs_info]
|
|
628
|
+
|
|
629
|
+
def _bulk_upload(self, inputs: List[Input], chunk_size: int = 128) -> None:
|
|
630
|
+
"""Uploads process for large number of inputs.
|
|
631
|
+
|
|
632
|
+
Args:
|
|
633
|
+
inputs (List[Input]): input protos
|
|
634
|
+
chunk_size (int): chunk size for each request
|
|
635
|
+
"""
|
|
636
|
+
num_workers: int = min(10, cpu_count()) # limit max workers to 10
|
|
637
|
+
chunk_size = min(128, chunk_size) # limit max protos in a req
|
|
638
|
+
chunked_inputs = Chunker(inputs, chunk_size).chunk()
|
|
639
|
+
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
|
640
|
+
with tqdm(total=len(chunked_inputs), desc='Uploading inputs') as progress:
|
|
641
|
+
# Submit all jobs to the executor and store the returned futures
|
|
642
|
+
futures = [
|
|
643
|
+
executor.submit(self._upload_batch, batch_input_ids)
|
|
644
|
+
for batch_input_ids in chunked_inputs
|
|
645
|
+
]
|
|
646
|
+
|
|
647
|
+
for job in as_completed(futures):
|
|
648
|
+
retry_input_proto = job.result()
|
|
649
|
+
self._retry_uploads(retry_input_proto)
|
|
650
|
+
progress.update()
|
|
651
|
+
|
|
652
|
+
def _wait_for_inputs(self, input_job_id: str) -> bool:
|
|
653
|
+
"""Wait for inputs to be processed. Cancel Job if timeout > 30 minutes.
|
|
654
|
+
|
|
655
|
+
Args:
|
|
656
|
+
input_job_id (str): Upload Input Job ID
|
|
657
|
+
|
|
658
|
+
Returns:
|
|
659
|
+
True if inputs are processed, False otherwise
|
|
660
|
+
"""
|
|
661
|
+
backoff_iterator = BackoffIterator()
|
|
662
|
+
max_retries = 10
|
|
663
|
+
start_time = time.time()
|
|
664
|
+
while True:
|
|
665
|
+
request = service_pb2.GetInputsAddJobRequest(user_app_id=self.user_app_id, id=input_job_id)
|
|
666
|
+
response = self._grpc_request(self.STUB.GetInputsAddJob, request)
|
|
667
|
+
|
|
668
|
+
if time.time() - start_time > 60 * 30 or max_retries == 0: # 30 minutes timeout
|
|
669
|
+
self._grpc_request(self.STUB.CancelInputsAddJob,
|
|
670
|
+
service_pb2.CancelInputsAddJobRequest(
|
|
671
|
+
user_app_id=self.user_app_id, id=input_job_id)) #Cancel Job
|
|
672
|
+
return False
|
|
673
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
|
674
|
+
max_retries -= 1
|
|
675
|
+
self.logger.warning(f"Get input job failed, status: {response.status.details}\n")
|
|
676
|
+
continue
|
|
677
|
+
if response.inputs_add_job.progress.in_progress_count == 0 and response.inputs_add_job.progress.pending_count == 0:
|
|
678
|
+
return True
|
|
679
|
+
else:
|
|
680
|
+
time.sleep(next(backoff_iterator))
|
|
681
|
+
|
|
682
|
+
def _retry_uploads(self, failed_inputs: List[Input]) -> None:
|
|
683
|
+
"""Retry failed uploads.
|
|
684
|
+
|
|
685
|
+
Args:
|
|
686
|
+
failed_inputs (List[Input]): failed input prots
|
|
687
|
+
"""
|
|
688
|
+
if failed_inputs:
|
|
689
|
+
self._upload_batch(failed_inputs)
|
|
690
|
+
|
|
691
|
+
def _delete_failed_inputs(self, inputs: List[Input]) -> List[Input]:
|
|
692
|
+
"""Delete failed input ids from clarifai platform dataset.
|
|
693
|
+
|
|
694
|
+
Args:
|
|
695
|
+
inputs (List[Input]): batch input protos
|
|
696
|
+
|
|
697
|
+
Returns:
|
|
698
|
+
failed_inputs: failed inputs
|
|
699
|
+
"""
|
|
700
|
+
input_ids = [input.id for input in inputs]
|
|
701
|
+
success_status = status_pb2.Status(code=status_code_pb2.INPUT_DOWNLOAD_SUCCESS)
|
|
702
|
+
request = service_pb2.ListInputsRequest(
|
|
703
|
+
ids=input_ids,
|
|
704
|
+
per_page=len(input_ids),
|
|
705
|
+
user_app_id=self.user_app_id,
|
|
706
|
+
status=success_status)
|
|
707
|
+
response = self._grpc_request(self.STUB.ListInputs, request)
|
|
708
|
+
response_dict = MessageToDict(response)
|
|
709
|
+
success_inputs = response_dict.get('inputs', [])
|
|
710
|
+
|
|
711
|
+
success_input_ids = [input.get('id') for input in success_inputs]
|
|
712
|
+
failed_inputs = [input for input in inputs if input.id not in success_input_ids]
|
|
713
|
+
#delete failed inputs
|
|
714
|
+
self._grpc_request(self.STUB.DeleteInputs,
|
|
715
|
+
service_pb2.DeleteInputsRequest(
|
|
716
|
+
user_app_id=self.user_app_id, ids=[input.id
|
|
717
|
+
for input in failed_inputs]))
|
|
718
|
+
|
|
719
|
+
return failed_inputs
|
|
720
|
+
|
|
721
|
+
def __getattr__(self, name):
|
|
722
|
+
return getattr(self.input_info, name)
|
|
723
|
+
|
|
724
|
+
def __str__(self):
|
|
725
|
+
init_params = [param for param in self.kwargs.keys()]
|
|
726
|
+
attribute_strings = [
|
|
727
|
+
f"{param}={getattr(self.input_info, param)}" for param in init_params
|
|
728
|
+
if hasattr(self.input_info, param)
|
|
729
|
+
]
|
|
730
|
+
return f"Input Details: \n{', '.join(attribute_strings)}\n"
|