clarifai 11.3.0rc2__py3-none-any.whl → 11.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/cli/__main__.py +1 -1
- clarifai/cli/base.py +144 -136
- clarifai/cli/compute_cluster.py +45 -31
- clarifai/cli/deployment.py +93 -76
- clarifai/cli/model.py +578 -180
- clarifai/cli/nodepool.py +100 -82
- clarifai/client/__init__.py +12 -2
- clarifai/client/app.py +973 -911
- clarifai/client/auth/helper.py +345 -342
- clarifai/client/auth/register.py +7 -7
- clarifai/client/auth/stub.py +107 -106
- clarifai/client/base.py +185 -178
- clarifai/client/compute_cluster.py +214 -180
- clarifai/client/dataset.py +793 -698
- clarifai/client/deployment.py +55 -50
- clarifai/client/input.py +1223 -1088
- clarifai/client/lister.py +47 -45
- clarifai/client/model.py +1939 -1717
- clarifai/client/model_client.py +525 -502
- clarifai/client/module.py +82 -73
- clarifai/client/nodepool.py +358 -213
- clarifai/client/runner.py +58 -0
- clarifai/client/search.py +342 -309
- clarifai/client/user.py +419 -414
- clarifai/client/workflow.py +294 -274
- clarifai/constants/dataset.py +11 -17
- clarifai/constants/model.py +8 -2
- clarifai/datasets/export/inputs_annotations.py +233 -217
- clarifai/datasets/upload/base.py +63 -51
- clarifai/datasets/upload/features.py +43 -38
- clarifai/datasets/upload/image.py +237 -207
- clarifai/datasets/upload/loaders/coco_captions.py +34 -32
- clarifai/datasets/upload/loaders/coco_detection.py +72 -65
- clarifai/datasets/upload/loaders/imagenet_classification.py +57 -53
- clarifai/datasets/upload/loaders/xview_detection.py +274 -132
- clarifai/datasets/upload/multimodal.py +55 -46
- clarifai/datasets/upload/text.py +55 -47
- clarifai/datasets/upload/utils.py +250 -234
- clarifai/errors.py +51 -50
- clarifai/models/api.py +260 -238
- clarifai/modules/css.py +50 -50
- clarifai/modules/pages.py +33 -33
- clarifai/rag/rag.py +312 -288
- clarifai/rag/utils.py +91 -84
- clarifai/runners/models/model_builder.py +906 -802
- clarifai/runners/models/model_class.py +370 -331
- clarifai/runners/models/model_run_locally.py +459 -419
- clarifai/runners/models/model_runner.py +170 -162
- clarifai/runners/models/model_servicer.py +78 -70
- clarifai/runners/server.py +111 -101
- clarifai/runners/utils/code_script.py +225 -187
- clarifai/runners/utils/const.py +4 -1
- clarifai/runners/utils/data_types/__init__.py +12 -0
- clarifai/runners/utils/data_types/data_types.py +598 -0
- clarifai/runners/utils/data_utils.py +387 -440
- clarifai/runners/utils/loader.py +247 -227
- clarifai/runners/utils/method_signatures.py +411 -386
- clarifai/runners/utils/openai_convertor.py +108 -109
- clarifai/runners/utils/serializers.py +175 -179
- clarifai/runners/utils/url_fetcher.py +35 -35
- clarifai/schema/search.py +56 -63
- clarifai/urls/helper.py +125 -102
- clarifai/utils/cli.py +129 -123
- clarifai/utils/config.py +127 -87
- clarifai/utils/constants.py +49 -0
- clarifai/utils/evaluation/helpers.py +503 -466
- clarifai/utils/evaluation/main.py +431 -393
- clarifai/utils/evaluation/testset_annotation_parser.py +154 -144
- clarifai/utils/logging.py +324 -306
- clarifai/utils/misc.py +60 -56
- clarifai/utils/model_train.py +165 -146
- clarifai/utils/protobuf.py +126 -103
- clarifai/versions.py +3 -1
- clarifai/workflows/export.py +48 -50
- clarifai/workflows/utils.py +39 -36
- clarifai/workflows/validate.py +55 -43
- {clarifai-11.3.0rc2.dist-info → clarifai-11.4.0.dist-info}/METADATA +16 -6
- clarifai-11.4.0.dist-info/RECORD +109 -0
- {clarifai-11.3.0rc2.dist-info → clarifai-11.4.0.dist-info}/WHEEL +1 -1
- clarifai/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/__pycache__/errors.cpython-310.pyc +0 -0
- clarifai/__pycache__/errors.cpython-311.pyc +0 -0
- clarifai/__pycache__/versions.cpython-310.pyc +0 -0
- clarifai/__pycache__/versions.cpython-311.pyc +0 -0
- clarifai/cli/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/cli/__pycache__/base.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/base.cpython-311.pyc +0 -0
- clarifai/cli/__pycache__/base_cli.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/compute_cluster.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/compute_cluster.cpython-311.pyc +0 -0
- clarifai/cli/__pycache__/deployment.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/deployment.cpython-311.pyc +0 -0
- clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/model.cpython-311.pyc +0 -0
- clarifai/cli/__pycache__/model_cli.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/nodepool.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/nodepool.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/app.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/app.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/app.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/base.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/base.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/compute_cluster.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/compute_cluster.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/dataset.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/dataset.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/deployment.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/deployment.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/input.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/input.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/lister.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/lister.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/model.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/model.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/module.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/module.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/nodepool.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/nodepool.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/search.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/search.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/user.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/user.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/workflow.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/workflow.cpython-311.pyc +0 -0
- clarifai/client/auth/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/client/auth/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/client/auth/__pycache__/helper.cpython-310.pyc +0 -0
- clarifai/client/auth/__pycache__/helper.cpython-311.pyc +0 -0
- clarifai/client/auth/__pycache__/register.cpython-310.pyc +0 -0
- clarifai/client/auth/__pycache__/register.cpython-311.pyc +0 -0
- clarifai/client/auth/__pycache__/stub.cpython-310.pyc +0 -0
- clarifai/client/auth/__pycache__/stub.cpython-311.pyc +0 -0
- clarifai/client/cli/__init__.py +0 -0
- clarifai/client/cli/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/client/cli/__pycache__/base_cli.cpython-310.pyc +0 -0
- clarifai/client/cli/__pycache__/model_cli.cpython-310.pyc +0 -0
- clarifai/client/cli/base_cli.py +0 -88
- clarifai/client/cli/model_cli.py +0 -29
- clarifai/constants/__pycache__/base.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/base.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/dataset.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/dataset.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/input.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/input.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/model.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/model.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/rag.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/rag.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/search.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/search.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/workflow.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/workflow.cpython-311.pyc +0 -0
- clarifai/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/datasets/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/datasets/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/export/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/datasets/export/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/datasets/export/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/export/__pycache__/inputs_annotations.cpython-310.pyc +0 -0
- clarifai/datasets/export/__pycache__/inputs_annotations.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/upload/__pycache__/base.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/base.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/features.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/features.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/image.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/image.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/multimodal.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/multimodal.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/text.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/text.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/utils.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/utils.cpython-311.pyc +0 -0
- clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/upload/loaders/__pycache__/coco_detection.cpython-311.pyc +0 -0
- clarifai/datasets/upload/loaders/__pycache__/imagenet_classification.cpython-311.pyc +0 -0
- clarifai/models/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/modules/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/rag/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/rag/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/rag/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
- clarifai/rag/__pycache__/rag.cpython-311.pyc +0 -0
- clarifai/rag/__pycache__/rag.cpython-39.pyc +0 -0
- clarifai/rag/__pycache__/utils.cpython-310.pyc +0 -0
- clarifai/rag/__pycache__/utils.cpython-311.pyc +0 -0
- clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/runners/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/runners/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/dockerfile_template/Dockerfile.cpu.template +0 -31
- clarifai/runners/dockerfile_template/Dockerfile.cuda.template +0 -42
- clarifai/runners/dockerfile_template/Dockerfile.nim +0 -71
- clarifai/runners/models/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/base_typed_model.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/base_typed_model.cpython-39.pyc +0 -0
- clarifai/runners/models/__pycache__/model_builder.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_class.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/model_run_locally.cpython-310-pytest-7.1.2.pyc +0 -0
- clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_run_locally.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_runner.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/model_upload.cpython-310.pyc +0 -0
- clarifai/runners/models/base_typed_model.py +0 -238
- clarifai/runners/models/model_class_refract.py +0 -80
- clarifai/runners/models/model_upload.py +0 -607
- clarifai/runners/models/temp.py +0 -25
- clarifai/runners/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/__init__.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/buffered_stream.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/buffered_stream.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/buffered_stream.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/const.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/const.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/constants.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/constants.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/constants.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_handler.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_handler.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_handler.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_utils.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_utils.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_utils.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_utils.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/grpc_server.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/grpc_server.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/grpc_server.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/health.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/health.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/health.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/loader.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/loader.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/logging.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/logging.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/logging.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/stream_source.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/stream_source.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/url_fetcher.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/url_fetcher.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/url_fetcher.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/url_fetcher.cpython-39.pyc +0 -0
- clarifai/runners/utils/data_handler.py +0 -231
- clarifai/runners/utils/data_handler_refract.py +0 -213
- clarifai/runners/utils/data_types.py +0 -469
- clarifai/runners/utils/logger.py +0 -0
- clarifai/runners/utils/openai_format.py +0 -87
- clarifai/schema/__pycache__/search.cpython-310.pyc +0 -0
- clarifai/schema/__pycache__/search.cpython-311.pyc +0 -0
- clarifai/urls/__pycache__/helper.cpython-310.pyc +0 -0
- clarifai/urls/__pycache__/helper.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/utils/__pycache__/cli.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/cli.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/config.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/constants.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/constants.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/logging.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/logging.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/misc.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/misc.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/model_train.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/model_train.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/protobuf.cpython-311.pyc +0 -0
- clarifai/utils/evaluation/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/utils/evaluation/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/utils/evaluation/__pycache__/helpers.cpython-311.pyc +0 -0
- clarifai/utils/evaluation/__pycache__/main.cpython-311.pyc +0 -0
- clarifai/utils/evaluation/__pycache__/main.cpython-39.pyc +0 -0
- clarifai/workflows/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/workflows/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/workflows/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/workflows/__pycache__/export.cpython-310.pyc +0 -0
- clarifai/workflows/__pycache__/export.cpython-311.pyc +0 -0
- clarifai/workflows/__pycache__/utils.cpython-310.pyc +0 -0
- clarifai/workflows/__pycache__/utils.cpython-311.pyc +0 -0
- clarifai/workflows/__pycache__/validate.cpython-310.pyc +0 -0
- clarifai/workflows/__pycache__/validate.cpython-311.pyc +0 -0
- clarifai-11.3.0rc2.dist-info/RECORD +0 -322
- {clarifai-11.3.0rc2.dist-info → clarifai-11.4.0.dist-info}/entry_points.txt +0 -0
- {clarifai-11.3.0rc2.dist-info → clarifai-11.4.0.dist-info/licenses}/LICENSE +0 -0
- {clarifai-11.3.0rc2.dist-info → clarifai-11.4.0.dist-info}/top_level.txt +0 -0
clarifai/client/input.py
CHANGED
@@ -26,55 +26,60 @@ from clarifai.utils.misc import BackoffIterator, Chunker, clean_input_id
|
|
26
26
|
|
27
27
|
|
28
28
|
class Inputs(Lister, BaseClient):
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
user_id: str = None,
|
33
|
-
app_id: str = None,
|
34
|
-
logger_level: str = "INFO",
|
35
|
-
base_url: str = "https://api.clarifai.com",
|
36
|
-
pat: str = None,
|
37
|
-
token: str = None,
|
38
|
-
root_certificates_path: str = None,
|
39
|
-
**kwargs):
|
40
|
-
"""Initializes an Input object.
|
41
|
-
|
42
|
-
Args:
|
43
|
-
user_id (str): A user ID for authentication.
|
44
|
-
app_id (str): An app ID for the application to interact with.
|
45
|
-
base_url (str): Base API url. Default "https://api.clarifai.com"
|
46
|
-
pat (str): A personal access token for authentication. Can be set as env var CLARIFAI_PAT
|
47
|
-
token (str): A session token for authentication. Accepts either a session token or a pat. Can be set as env var CLARIFAI_SESSION_TOKEN
|
48
|
-
root_certificates_path (str): Path to the SSL root certificates file, used to establish secure gRPC connections.
|
49
|
-
**kwargs: Additional keyword arguments to be passed to the Input
|
50
|
-
"""
|
51
|
-
self.user_id = user_id
|
52
|
-
self.app_id = app_id
|
53
|
-
self.kwargs = {**kwargs}
|
54
|
-
self.input_info = resources_pb2.Input(**self.kwargs)
|
55
|
-
self.logger = logger
|
56
|
-
BaseClient.__init__(
|
29
|
+
"""Inputs is a class that provides access to Clarifai API endpoints related to Input information."""
|
30
|
+
|
31
|
+
def __init__(
|
57
32
|
self,
|
58
|
-
user_id=
|
59
|
-
app_id=
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
33
|
+
user_id: str = None,
|
34
|
+
app_id: str = None,
|
35
|
+
logger_level: str = "INFO",
|
36
|
+
base_url: str = "https://api.clarifai.com",
|
37
|
+
pat: str = None,
|
38
|
+
token: str = None,
|
39
|
+
root_certificates_path: str = None,
|
40
|
+
**kwargs,
|
41
|
+
):
|
42
|
+
"""Initializes an Input object.
|
43
|
+
|
44
|
+
Args:
|
45
|
+
user_id (str): A user ID for authentication.
|
46
|
+
app_id (str): An app ID for the application to interact with.
|
47
|
+
base_url (str): Base API url. Default "https://api.clarifai.com"
|
48
|
+
pat (str): A personal access token for authentication. Can be set as env var CLARIFAI_PAT
|
49
|
+
token (str): A session token for authentication. Accepts either a session token or a pat. Can be set as env var CLARIFAI_SESSION_TOKEN
|
50
|
+
root_certificates_path (str): Path to the SSL root certificates file, used to establish secure gRPC connections.
|
51
|
+
**kwargs: Additional keyword arguments to be passed to the Input
|
52
|
+
"""
|
53
|
+
self.user_id = user_id
|
54
|
+
self.app_id = app_id
|
55
|
+
self.kwargs = {**kwargs}
|
56
|
+
self.input_info = resources_pb2.Input(**self.kwargs)
|
57
|
+
self.logger = logger
|
58
|
+
BaseClient.__init__(
|
59
|
+
self,
|
60
|
+
user_id=self.user_id,
|
61
|
+
app_id=self.app_id,
|
62
|
+
base=base_url,
|
63
|
+
pat=pat,
|
64
|
+
token=token,
|
65
|
+
root_certificates_path=root_certificates_path,
|
66
|
+
)
|
67
|
+
Lister.__init__(self)
|
68
|
+
|
69
|
+
@staticmethod
|
70
|
+
def _get_proto(
|
71
|
+
input_id: str,
|
72
|
+
dataset_id: str = None,
|
73
|
+
imagepb: Image = None,
|
74
|
+
video_pb: Video = None,
|
75
|
+
audio_pb: Audio = None,
|
76
|
+
text_pb: Text = None,
|
77
|
+
geo_info: List = None,
|
78
|
+
labels: List = None,
|
79
|
+
label_ids: List = None,
|
80
|
+
metadata: Struct = None,
|
81
|
+
) -> Input:
|
82
|
+
"""Create input proto for image data type.
|
78
83
|
Args:
|
79
84
|
input_id (str): The input ID for the input to create.
|
80
85
|
dataset_id (str): The dataset ID for the dataset to add the input to.
|
@@ -89,1052 +94,1182 @@ class Inputs(Lister, BaseClient):
|
|
89
94
|
Returns:
|
90
95
|
Input: An Input object for the specified input ID.
|
91
96
|
"""
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
97
|
+
assert geo_info is None or isinstance(geo_info, list), (
|
98
|
+
"geo_info must be a list of longitude and latitude"
|
99
|
+
)
|
100
|
+
assert labels is None or isinstance(labels, list), "labels must be a list of strings"
|
101
|
+
assert label_ids is None or isinstance(label_ids, list), (
|
102
|
+
"label_ids must be a list of strings"
|
103
|
+
)
|
104
|
+
assert metadata is None or isinstance(metadata, Struct), "metadata must be a Struct"
|
105
|
+
geo_pb = (
|
106
|
+
resources_pb2.Geo(
|
107
|
+
geo_point=resources_pb2.GeoPoint(longitude=geo_info[0], latitude=geo_info[1])
|
108
|
+
)
|
109
|
+
if geo_info
|
110
|
+
else None
|
111
|
+
)
|
112
|
+
if labels:
|
113
|
+
if not label_ids:
|
114
|
+
concepts = [
|
115
|
+
resources_pb2.Concept(id=_label, name=_label, value=1.0) for _label in labels
|
116
|
+
]
|
117
|
+
else:
|
118
|
+
assert len(labels) == len(label_ids), (
|
119
|
+
"labels and label_ids must be of the same length"
|
120
|
+
)
|
121
|
+
concepts = [
|
122
|
+
resources_pb2.Concept(id=label_id, name=_label, value=1.0)
|
123
|
+
for label_id, _label in zip(label_ids, labels)
|
124
|
+
]
|
125
|
+
else:
|
126
|
+
concepts = None
|
127
|
+
|
128
|
+
if dataset_id:
|
129
|
+
return resources_pb2.Input(
|
130
|
+
id=input_id,
|
131
|
+
dataset_ids=[dataset_id],
|
132
|
+
data=resources_pb2.Data(
|
133
|
+
image=imagepb,
|
134
|
+
video=video_pb,
|
135
|
+
audio=audio_pb,
|
136
|
+
text=text_pb,
|
137
|
+
geo=geo_pb,
|
138
|
+
concepts=concepts,
|
139
|
+
metadata=metadata,
|
140
|
+
),
|
141
|
+
)
|
142
|
+
|
143
|
+
return resources_pb2.Input(
|
144
|
+
id=input_id,
|
145
|
+
data=resources_pb2.Data(
|
146
|
+
image=imagepb,
|
147
|
+
video=video_pb,
|
148
|
+
audio=audio_pb,
|
149
|
+
text=text_pb,
|
150
|
+
geo=geo_pb,
|
151
|
+
concepts=concepts,
|
152
|
+
metadata=metadata,
|
153
|
+
),
|
154
|
+
)
|
155
|
+
|
156
|
+
@staticmethod
|
157
|
+
def get_input_from_url(
|
158
|
+
input_id: str,
|
159
|
+
image_url: str = None,
|
160
|
+
video_url: str = None,
|
161
|
+
audio_url: str = None,
|
162
|
+
text_url: str = None,
|
163
|
+
dataset_id: str = None,
|
164
|
+
**kwargs,
|
165
|
+
) -> Input:
|
166
|
+
"""Create input proto from url.
|
167
|
+
|
168
|
+
Args:
|
169
|
+
input_id (str): The input ID for the input to create.
|
170
|
+
image_url (str): The url for the image.
|
171
|
+
video_url (str): The url for the video.
|
172
|
+
audio_url (str): The url for the audio.
|
173
|
+
text_url (str): The url for the text.
|
174
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
175
|
+
|
176
|
+
Returns:
|
177
|
+
Input: An Input object for the specified input ID.
|
178
|
+
|
179
|
+
Example:
|
180
|
+
>>> from clarifai.client.input import Inputs
|
181
|
+
>>> input_proto = Inputs.get_input_from_url(input_id = 'demo', image_url='https://samples.clarifai.com/metro-north.jpg')
|
182
|
+
"""
|
183
|
+
if not any((image_url, video_url, audio_url, text_url)):
|
184
|
+
raise ValueError(
|
185
|
+
"At least one of image_url, video_url, audio_url, text_url must be provided."
|
186
|
+
)
|
187
|
+
image_pb = resources_pb2.Image(url=image_url) if image_url else None
|
188
|
+
video_pb = resources_pb2.Video(url=video_url) if video_url else None
|
189
|
+
audio_pb = resources_pb2.Audio(url=audio_url) if audio_url else None
|
190
|
+
text_pb = resources_pb2.Text(url=text_url) if text_url else None
|
191
|
+
return Inputs._get_proto(
|
192
|
+
input_id=input_id,
|
193
|
+
dataset_id=dataset_id,
|
194
|
+
imagepb=image_pb,
|
195
|
+
video_pb=video_pb,
|
196
|
+
audio_pb=audio_pb,
|
197
|
+
text_pb=text_pb,
|
198
|
+
**kwargs,
|
199
|
+
)
|
200
|
+
|
201
|
+
@staticmethod
|
202
|
+
def get_input_from_file(
|
203
|
+
input_id: str,
|
204
|
+
image_file: str = None,
|
205
|
+
video_file: str = None,
|
206
|
+
audio_file: str = None,
|
207
|
+
text_file: str = None,
|
208
|
+
dataset_id: str = None,
|
209
|
+
**kwargs,
|
210
|
+
) -> Input:
|
211
|
+
"""Create input proto from files.
|
212
|
+
|
213
|
+
Args:
|
214
|
+
input_id (str): The input ID for the input to create.
|
215
|
+
image_file (str): The file_path for the image.
|
216
|
+
video_file (str): The file_path for the video.
|
217
|
+
audio_file (str): The file_path for the audio.
|
218
|
+
text_file (str): The file_path for the text.
|
156
219
|
dataset_id (str): The dataset ID for the dataset to add the input to.
|
157
220
|
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
text_pb=
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
221
|
+
Returns:
|
222
|
+
Input: An Input object for the specified input ID.
|
223
|
+
|
224
|
+
Example:
|
225
|
+
>>> from clarifai.client.input import Inputs
|
226
|
+
>>> input_proto = Inputs.get_input_from_file(input_id = 'demo', video_file='file_path')
|
227
|
+
"""
|
228
|
+
if not any((image_file, video_file, audio_file, text_file)):
|
229
|
+
raise ValueError(
|
230
|
+
"At least one of image_file, video_file, audio_file, text_file must be provided."
|
231
|
+
)
|
232
|
+
image_pb = (
|
233
|
+
resources_pb2.Image(base64=open(image_file, 'rb').read()) if image_file else None
|
234
|
+
)
|
235
|
+
video_pb = (
|
236
|
+
resources_pb2.Video(base64=open(video_file, 'rb').read()) if video_file else None
|
237
|
+
)
|
238
|
+
audio_pb = (
|
239
|
+
resources_pb2.Audio(base64=open(audio_file, 'rb').read()) if audio_file else None
|
240
|
+
)
|
241
|
+
text_pb = resources_pb2.Text(raw=open(text_file, 'rb').read()) if text_file else None
|
242
|
+
return Inputs._get_proto(
|
243
|
+
input_id=input_id,
|
244
|
+
dataset_id=dataset_id,
|
245
|
+
imagepb=image_pb,
|
246
|
+
video_pb=video_pb,
|
247
|
+
audio_pb=audio_pb,
|
248
|
+
text_pb=text_pb,
|
249
|
+
**kwargs,
|
250
|
+
)
|
251
|
+
|
252
|
+
@staticmethod
|
253
|
+
def get_input_from_bytes(
|
254
|
+
input_id: str,
|
255
|
+
image_bytes: bytes = None,
|
256
|
+
video_bytes: bytes = None,
|
257
|
+
audio_bytes: bytes = None,
|
258
|
+
text_bytes: bytes = None,
|
259
|
+
dataset_id: str = None,
|
260
|
+
**kwargs,
|
261
|
+
) -> Input:
|
262
|
+
"""Create input proto from bytes.
|
263
|
+
|
264
|
+
Args:
|
265
|
+
input_id (str): The input ID for the input to create.
|
266
|
+
image_bytes (str): The bytes for the image.
|
267
|
+
video_bytes (str): The bytes for the video.
|
268
|
+
audio_bytes (str): The bytes for the audio.
|
269
|
+
text_bytes (str): The bytes for the text.
|
270
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
271
|
+
|
272
|
+
Returns:
|
273
|
+
Input: An Input object for the specified input ID.
|
274
|
+
|
275
|
+
Example:
|
276
|
+
>>> from clarifai.client.input import Inputs
|
277
|
+
>>> image = open('demo.jpg', 'rb').read()
|
278
|
+
>>> video = open('demo.mp4', 'rb').read()
|
279
|
+
>>> input_proto = Inputs.get_input_from_bytes(input_id = 'demo',image_bytes =image, video_bytes=video)
|
280
|
+
"""
|
281
|
+
if not any((image_bytes, video_bytes, audio_bytes, text_bytes)):
|
282
|
+
raise ValueError(
|
283
|
+
"At least one of image_bytes, video_bytes, audio_bytes, text_bytes must be provided."
|
284
|
+
)
|
285
|
+
image_pb = resources_pb2.Image(base64=image_bytes) if image_bytes else None
|
286
|
+
video_pb = resources_pb2.Video(base64=video_bytes) if video_bytes else None
|
287
|
+
audio_pb = resources_pb2.Audio(base64=audio_bytes) if audio_bytes else None
|
288
|
+
text_pb = resources_pb2.Text(raw=text_bytes) if text_bytes else None
|
289
|
+
return Inputs._get_proto(
|
290
|
+
input_id=input_id,
|
291
|
+
dataset_id=dataset_id,
|
292
|
+
imagepb=image_pb,
|
293
|
+
video_pb=video_pb,
|
294
|
+
audio_pb=audio_pb,
|
295
|
+
text_pb=text_pb,
|
296
|
+
**kwargs,
|
297
|
+
)
|
298
|
+
|
299
|
+
@staticmethod
|
300
|
+
def get_image_inputs_from_folder(
|
301
|
+
folder_path: str, dataset_id: str = None, labels: bool = False
|
302
|
+
) -> List[Input]: # image specific
|
303
|
+
"""Create input protos for image data type from folder.
|
304
|
+
|
305
|
+
Args:
|
306
|
+
folder_path (str): Path to the folder containing images.
|
307
|
+
|
308
|
+
Returns:
|
309
|
+
list of Input: A list of Input objects for the specified folder.
|
310
|
+
|
311
|
+
Example:
|
312
|
+
>>> from clarifai.client.input import Inputs
|
313
|
+
>>> input_protos = Inputs.get_image_inputs_from_folder(folder_path='demo_folder')
|
314
|
+
"""
|
315
|
+
input_protos = []
|
316
|
+
labels = [folder_path.split('/')[-1]] if labels else None
|
317
|
+
for filename in os.listdir(folder_path):
|
318
|
+
if filename.split('.')[-1] not in ['jpg', 'jpeg', 'png', 'tiff', 'webp']:
|
319
|
+
continue
|
320
|
+
input_id = clean_input_id(filename.split('.')[0])
|
321
|
+
image_pb = resources_pb2.Image(
|
322
|
+
base64=open(os.path.join(folder_path, filename), 'rb').read()
|
323
|
+
)
|
324
|
+
input_protos.append(
|
325
|
+
Inputs._get_proto(
|
326
|
+
input_id=input_id, dataset_id=dataset_id, imagepb=image_pb, labels=labels
|
327
|
+
)
|
328
|
+
)
|
329
|
+
return input_protos
|
330
|
+
|
331
|
+
@staticmethod
|
332
|
+
def get_text_input(
|
333
|
+
input_id: str, raw_text: str, dataset_id: str = None, **kwargs
|
334
|
+
) -> Text: # text specific
|
335
|
+
"""Create input proto for text data type from raw text.
|
336
|
+
|
337
|
+
Args:
|
338
|
+
input_id (str): The input ID for the input to create.
|
339
|
+
raw_text (str): The raw text input.
|
340
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
341
|
+
**kwargs: Additional keyword arguments to be passed to the Input
|
342
|
+
|
343
|
+
Returns:
|
344
|
+
Text: An Input object for the specified input ID.
|
345
|
+
|
346
|
+
Example:
|
347
|
+
>>> from clarifai.client.input import Inputs
|
348
|
+
>>> input_protos = Inputs.get_text_input(input_id = 'demo', raw_text = 'This is a test')
|
349
|
+
"""
|
350
|
+
text_pb = resources_pb2.Text(raw=raw_text)
|
351
|
+
return Inputs._get_proto(
|
352
|
+
input_id=input_id, dataset_id=dataset_id, text_pb=text_pb, **kwargs
|
353
|
+
)
|
354
|
+
|
355
|
+
@staticmethod
|
356
|
+
def get_multimodal_input(
|
357
|
+
input_id: str,
|
358
|
+
raw_text: str = None,
|
359
|
+
text_bytes: bytes = None,
|
360
|
+
image_url: str = None,
|
361
|
+
image_bytes: bytes = None,
|
362
|
+
dataset_id: str = None,
|
363
|
+
**kwargs,
|
364
|
+
) -> Text:
|
365
|
+
"""Create input proto for text and image from bytes or url.
|
366
|
+
|
367
|
+
Args:
|
368
|
+
input_id (str): The input ID for the input to create.
|
369
|
+
raw_text (str): The raw text input.
|
370
|
+
text_bytes (str): The bytes for the text.
|
371
|
+
image_url (str): The url for the image.
|
372
|
+
image_bytes (str): The bytes for the image.
|
373
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
374
|
+
**kwargs: Additional keyword arguments to be passed to the Input
|
375
|
+
|
376
|
+
Returns:
|
377
|
+
Input: An Input object for the specified input ID.
|
378
|
+
|
379
|
+
Example:
|
380
|
+
>>> from clarifai.client.input import Inputs
|
381
|
+
>>> input_protos = Inputs.get_multimodal_input(input_id = 'demo', raw_text = 'What time of day is it?', image_url='https://samples.clarifai.com/metro-north.jpg')
|
382
|
+
"""
|
383
|
+
if (image_bytes and image_url) or (not image_bytes and not image_url):
|
384
|
+
return UserError("Please supply only one of image_bytes or image_url, and not both.")
|
385
|
+
if (text_bytes and raw_text) or (not text_bytes and not raw_text):
|
386
|
+
return UserError("Please supply only one of text_bytes or raw_text, and not both.")
|
387
|
+
|
388
|
+
image_pb = (
|
389
|
+
resources_pb2.Image(base64=image_bytes)
|
390
|
+
if image_bytes
|
391
|
+
else resources_pb2.Image(url=image_url)
|
392
|
+
if image_url
|
393
|
+
else None
|
394
|
+
)
|
395
|
+
text_pb = (
|
396
|
+
resources_pb2.Text(raw=text_bytes)
|
397
|
+
if text_bytes
|
398
|
+
else resources_pb2.Text(raw=raw_text)
|
399
|
+
if raw_text
|
400
|
+
else None
|
401
|
+
)
|
402
|
+
return Inputs._get_proto(
|
403
|
+
input_id=input_id, dataset_id=dataset_id, imagepb=image_pb, text_pb=text_pb, **kwargs
|
404
|
+
)
|
405
|
+
|
406
|
+
@staticmethod
|
407
|
+
def get_inputs_from_csv(
|
408
|
+
csv_path: str,
|
409
|
+
input_type: str = 'text',
|
410
|
+
csv_type: str = 'raw',
|
411
|
+
dataset_id: str = None,
|
412
|
+
labels: str = True,
|
413
|
+
) -> List[Text]:
|
414
|
+
"""Create input protos from csv.
|
415
|
+
|
416
|
+
Args:
|
417
|
+
csv_path (str): Path to the csv file.
|
418
|
+
input_type (str): Type of input. Options: 'text', 'image', 'video', 'audio'.
|
419
|
+
csv_type (str): Type of csv file. Options: 'raw', 'url', 'file_path'.
|
420
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
421
|
+
labels (str): True if csv file has labels column.
|
422
|
+
|
423
|
+
Returns:
|
424
|
+
inputs: List of inputs
|
425
|
+
|
426
|
+
Example:
|
427
|
+
>>> from clarifai.client.input import Inputs
|
428
|
+
>>> input_protos = Inputs.get_inputs_from_csv(csv_path='filepath', input_type='text', csv_type='raw')
|
429
|
+
"""
|
430
|
+
input_protos = []
|
431
|
+
with open(csv_path) as _file:
|
432
|
+
reader = csv.DictReader(_file, delimiter=',', quotechar='"')
|
433
|
+
columns = reader.fieldnames
|
434
|
+
for column in columns:
|
435
|
+
if column not in ['inputid', 'input', 'concepts', 'metadata', 'geopoints']:
|
436
|
+
raise UserError(
|
437
|
+
"CSV file may have 'inputid', 'input', 'concepts', 'metadata', 'geopoints' columns. Does not support '{}' column".format(
|
438
|
+
column
|
439
|
+
)
|
440
|
+
)
|
441
|
+
for id, input in enumerate(reader):
|
442
|
+
if labels:
|
443
|
+
labels_list = input['concepts'].split(',')
|
444
|
+
labels = labels_list if len(input['concepts']) > 0 else None
|
445
|
+
else:
|
446
|
+
labels = None
|
447
|
+
|
448
|
+
if 'metadata' in columns:
|
449
|
+
if len(input['metadata']) > 0:
|
450
|
+
metadata_str = input['metadata'].replace("'", '"')
|
451
|
+
try:
|
452
|
+
metadata_dict = json.loads(metadata_str)
|
453
|
+
except json.decoder.JSONDecodeError:
|
454
|
+
raise UserError("metadata column in CSV file should be a valid json")
|
455
|
+
metadata = Struct()
|
456
|
+
metadata.update(metadata_dict)
|
457
|
+
else:
|
458
|
+
metadata = None
|
459
|
+
else:
|
460
|
+
metadata = None
|
461
|
+
|
462
|
+
if 'geopoints' in columns:
|
463
|
+
if len(input['geopoints']) > 0:
|
464
|
+
geo_points = input['geopoints'].split(',')
|
465
|
+
geo_points = [float(geo_point) for geo_point in geo_points]
|
466
|
+
geo_info = (
|
467
|
+
geo_points
|
468
|
+
if len(geo_points) == 2
|
469
|
+
else UserError(
|
470
|
+
"geopoints column in CSV file should have longitude,latitude"
|
471
|
+
)
|
472
|
+
)
|
473
|
+
else:
|
474
|
+
geo_info = None
|
475
|
+
else:
|
476
|
+
geo_info = None
|
477
|
+
|
478
|
+
input_id = input['inputid'] if 'inputid' in columns else uuid.uuid4().hex
|
479
|
+
text = input['input'] if input_type == 'text' else None
|
480
|
+
image = input['input'] if input_type == 'image' else None
|
481
|
+
video = input['input'] if input_type == 'video' else None
|
482
|
+
audio = input['input'] if input_type == 'audio' else None
|
483
|
+
|
484
|
+
if csv_type == 'raw':
|
485
|
+
input_protos.append(
|
486
|
+
Inputs.get_text_input(
|
487
|
+
input_id=input_id,
|
488
|
+
raw_text=text,
|
489
|
+
dataset_id=dataset_id,
|
490
|
+
labels=labels,
|
491
|
+
metadata=metadata,
|
492
|
+
geo_info=geo_info,
|
493
|
+
)
|
494
|
+
)
|
495
|
+
elif csv_type == 'url':
|
496
|
+
input_protos.append(
|
497
|
+
Inputs.get_input_from_url(
|
498
|
+
input_id=input_id,
|
499
|
+
image_url=image,
|
500
|
+
text_url=text,
|
501
|
+
audio_url=audio,
|
502
|
+
video_url=video,
|
503
|
+
dataset_id=dataset_id,
|
504
|
+
labels=labels,
|
505
|
+
metadata=metadata,
|
506
|
+
geo_info=geo_info,
|
507
|
+
)
|
508
|
+
)
|
509
|
+
else:
|
510
|
+
input_protos.append(
|
511
|
+
Inputs.get_input_from_file(
|
512
|
+
input_id=input_id,
|
513
|
+
image_file=image,
|
514
|
+
text_file=text,
|
515
|
+
audio_file=audio,
|
516
|
+
video_file=video,
|
517
|
+
dataset_id=dataset_id,
|
518
|
+
labels=labels,
|
519
|
+
metadata=metadata,
|
520
|
+
geo_info=geo_info,
|
521
|
+
)
|
522
|
+
)
|
523
|
+
|
524
|
+
return input_protos
|
525
|
+
|
526
|
+
@staticmethod
|
527
|
+
def get_text_inputs_from_folder(
|
528
|
+
folder_path: str, dataset_id: str = None, labels: bool = False
|
529
|
+
) -> List[Text]: # text specific
|
530
|
+
"""Create input protos for text data type from folder.
|
531
|
+
|
532
|
+
Args:
|
533
|
+
folder_path (str): Path to the folder containing text.
|
534
|
+
|
535
|
+
Returns:
|
536
|
+
list of Input: A list of Input objects for the specified folder.
|
537
|
+
|
538
|
+
Example:
|
539
|
+
>>> from clarifai.client.input import Inputs
|
540
|
+
>>> input_protos = Inputs.get_text_inputs_from_folder(folder_path='demo_folder')
|
541
|
+
"""
|
542
|
+
input_protos = []
|
543
|
+
labels = [folder_path.split('/')[-1]] if labels else None
|
544
|
+
for filename in os.listdir(folder_path):
|
545
|
+
if filename.split('.')[-1] != 'txt':
|
546
|
+
continue
|
547
|
+
input_id = clean_input_id(filename.split('.')[0])
|
548
|
+
text_pb = resources_pb2.Text(
|
549
|
+
raw=open(os.path.join(folder_path, filename), 'rb').read()
|
550
|
+
)
|
551
|
+
input_protos.append(
|
552
|
+
Inputs._get_proto(
|
553
|
+
input_id=input_id, dataset_id=dataset_id, text_pb=text_pb, labels=labels
|
554
|
+
)
|
555
|
+
)
|
556
|
+
return input_protos
|
557
|
+
|
558
|
+
@staticmethod
|
559
|
+
def get_bbox_proto(
|
560
|
+
input_id: str, label: str, bbox: List, label_id: str = None, annot_id: str = None
|
561
|
+
) -> Annotation:
|
562
|
+
"""Create an annotation proto for each bounding box, label input pair.
|
563
|
+
|
564
|
+
Args:
|
565
|
+
input_id (str): The input ID for the annotation to create.
|
566
|
+
label (str): annotation label name
|
567
|
+
bbox (List): a list of a single bbox's coordinates. # bbox ordering: [xmin, ymin, xmax, ymax]
|
568
|
+
label_id (str): annotation label ID
|
569
|
+
annot_id (str): annotation ID
|
570
|
+
|
571
|
+
Returns:
|
572
|
+
An annotation object for the specified input ID.
|
573
|
+
|
574
|
+
Example:
|
575
|
+
>>> from clarifai.client.input import Inputs
|
576
|
+
>>> Inputs.get_bbox_proto(input_id='demo', label='demo', bbox=[x_min, y_min, x_max, y_max])
|
577
|
+
"""
|
578
|
+
if not isinstance(bbox, list):
|
579
|
+
raise UserError("must be a list of bbox cooridnates")
|
580
|
+
annot_data = resources_pb2.Data(
|
581
|
+
regions=[
|
582
|
+
resources_pb2.Region(
|
583
|
+
region_info=resources_pb2.RegionInfo(
|
584
|
+
bounding_box=resources_pb2.BoundingBox(
|
585
|
+
# bbox ordering: [xmin, ymin, xmax, ymax]
|
586
|
+
# top_row must be less than bottom row
|
587
|
+
# left_col must be less than right col
|
588
|
+
top_row=bbox[1], # y_min
|
589
|
+
left_col=bbox[0], # x_min
|
590
|
+
bottom_row=bbox[3], # y_max
|
591
|
+
right_col=bbox[2], # x_max
|
592
|
+
)
|
593
|
+
),
|
594
|
+
data=resources_pb2.Data(
|
595
|
+
concepts=[
|
596
|
+
resources_pb2.Concept(id=label, name=label, value=1.0)
|
597
|
+
if not label_id
|
598
|
+
else resources_pb2.Concept(id=label_id, name=label, value=1.0)
|
599
|
+
]
|
600
|
+
),
|
601
|
+
)
|
602
|
+
]
|
603
|
+
)
|
604
|
+
if annot_id:
|
605
|
+
input_annot_proto = resources_pb2.Annotation(
|
606
|
+
id=annot_id, input_id=input_id, data=annot_data
|
607
|
+
)
|
386
608
|
else:
|
387
|
-
|
609
|
+
input_annot_proto = resources_pb2.Annotation(input_id=input_id, data=annot_data)
|
610
|
+
|
611
|
+
return input_annot_proto
|
612
|
+
|
613
|
+
@staticmethod
|
614
|
+
def get_mask_proto(
|
615
|
+
input_id: str,
|
616
|
+
label: str,
|
617
|
+
polygons: List[List[float]],
|
618
|
+
label_id: str = None,
|
619
|
+
annot_id: str = None,
|
620
|
+
) -> Annotation:
|
621
|
+
"""Create an annotation proto for each polygon box, label input pair.
|
622
|
+
|
623
|
+
Args:
|
624
|
+
input_id (str): The input ID for the annotation to create.
|
625
|
+
label (str): annotation label name
|
626
|
+
polygons (List): Polygon x,y points iterable
|
627
|
+
label_id (str): annotation label ID
|
628
|
+
annot_id (str): annotation ID
|
629
|
+
|
630
|
+
Returns:
|
631
|
+
An annotation object for the specified input ID.
|
388
632
|
|
389
|
-
|
390
|
-
|
391
|
-
|
633
|
+
Example:
|
634
|
+
>>> from clarifai.client.input import Inputs
|
635
|
+
>>> Inputs.get_mask_proto(input_id='demo', label='demo', polygons=[[[x,y],...,[x,y]],...])
|
636
|
+
"""
|
637
|
+
if not isinstance(polygons, list):
|
638
|
+
raise UserError("polygons must be a list of points")
|
639
|
+
annot_data = resources_pb2.Data(
|
640
|
+
regions=[
|
641
|
+
resources_pb2.Region(
|
642
|
+
region_info=resources_pb2.RegionInfo(
|
643
|
+
polygon=resources_pb2.Polygon(
|
644
|
+
points=[
|
645
|
+
resources_pb2.Point(
|
646
|
+
row=_point[1], # row is y point
|
647
|
+
col=_point[0], # col is x point
|
648
|
+
visibility="VISIBLE",
|
649
|
+
)
|
650
|
+
for _point in polygons
|
651
|
+
]
|
652
|
+
)
|
653
|
+
),
|
654
|
+
data=resources_pb2.Data(
|
655
|
+
concepts=[
|
656
|
+
resources_pb2.Concept(id=label, name=label, value=1.0)
|
657
|
+
if not label_id
|
658
|
+
else resources_pb2.Concept(id=label_id, name=label, value=1.0)
|
659
|
+
]
|
660
|
+
),
|
661
|
+
)
|
662
|
+
]
|
663
|
+
)
|
664
|
+
if annot_id:
|
665
|
+
input_mask_proto = resources_pb2.Annotation(
|
666
|
+
id=annot_id, input_id=input_id, data=annot_data
|
667
|
+
)
|
668
|
+
else:
|
669
|
+
input_mask_proto = resources_pb2.Annotation(input_id=input_id, data=annot_data)
|
670
|
+
|
671
|
+
return input_mask_proto
|
672
|
+
|
673
|
+
def get_input(self, input_id: str) -> Input:
|
674
|
+
"""Get Input object of input with input_id provided from the app.
|
675
|
+
|
676
|
+
Args:
|
677
|
+
input_id (str): The input ID for the annotation to get.
|
678
|
+
|
679
|
+
Returns:
|
680
|
+
Input: An Input object for the specified input ID.
|
681
|
+
|
682
|
+
Example:
|
683
|
+
>>> from clarifai.client.input import Inputs
|
684
|
+
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
685
|
+
>>> input_obj.get_input(input_id='demo')
|
686
|
+
"""
|
687
|
+
request = service_pb2.GetInputRequest(user_app_id=self.user_app_id, input_id=input_id)
|
688
|
+
response = self._grpc_request(self.STUB.GetInput, request)
|
689
|
+
return response.input
|
690
|
+
|
691
|
+
def upload_from_url(
|
692
|
+
self,
|
693
|
+
input_id: str,
|
694
|
+
image_url: str = None,
|
695
|
+
video_url: str = None,
|
696
|
+
audio_url: str = None,
|
697
|
+
text_url: str = None,
|
698
|
+
dataset_id: str = None,
|
699
|
+
**kwargs,
|
700
|
+
) -> str:
|
701
|
+
"""Upload input from url.
|
702
|
+
|
703
|
+
Args:
|
704
|
+
input_id (str): The input ID for the input to create.
|
705
|
+
image_url (str): The url for the image.
|
706
|
+
video_url (str): The url for the video.
|
707
|
+
audio_url (str): The url for the audio.
|
708
|
+
text_url (str): The url for the text.
|
709
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
710
|
+
|
711
|
+
Returns:
|
712
|
+
input_job_id: job id for the upload request.
|
713
|
+
|
714
|
+
Example:
|
715
|
+
>>> from clarifai.client.input import Inputs
|
716
|
+
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
717
|
+
>>> input_obj.upload_from_url(input_id='demo', image_url='https://samples.clarifai.com/metro-north.jpg')
|
718
|
+
"""
|
719
|
+
input_pb = self.get_input_from_url(
|
720
|
+
input_id, image_url, video_url, audio_url, text_url, dataset_id, **kwargs
|
721
|
+
)
|
722
|
+
return self.upload_inputs([input_pb])
|
723
|
+
|
724
|
+
def upload_from_file(
|
725
|
+
self,
|
726
|
+
input_id: str,
|
727
|
+
image_file: str = None,
|
728
|
+
video_file: str = None,
|
729
|
+
audio_file: str = None,
|
730
|
+
text_file: str = None,
|
731
|
+
dataset_id: str = None,
|
732
|
+
**kwargs,
|
733
|
+
) -> str:
|
734
|
+
"""Upload input from file.
|
735
|
+
|
736
|
+
Args:
|
737
|
+
input_id (str): The input ID for the input to create.
|
738
|
+
image_file (str): The file for the image.
|
739
|
+
video_file (str): The file for the video.
|
740
|
+
audio_file (str): The file for the audio.
|
741
|
+
text_file (str): The file for the text.
|
742
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
743
|
+
|
744
|
+
Returns:
|
745
|
+
input_job_id: job id for the upload request.
|
746
|
+
|
747
|
+
Example:
|
748
|
+
>>> from clarifai.client.input import Inputs
|
749
|
+
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
750
|
+
>>> input_obj.upload_from_file(input_id='demo', audio_file='demo.mp3')
|
751
|
+
"""
|
752
|
+
input_pb = self.get_input_from_file(
|
753
|
+
input_id, image_file, video_file, audio_file, text_file, dataset_id, **kwargs
|
754
|
+
)
|
755
|
+
return self.upload_inputs([input_pb])
|
756
|
+
|
757
|
+
def upload_from_bytes(
|
758
|
+
self,
|
759
|
+
input_id: str,
|
760
|
+
image_bytes: bytes = None,
|
761
|
+
video_bytes: bytes = None,
|
762
|
+
audio_bytes: bytes = None,
|
763
|
+
text_bytes: bytes = None,
|
764
|
+
dataset_id: str = None,
|
765
|
+
**kwargs,
|
766
|
+
) -> str:
|
767
|
+
"""Upload input from bytes.
|
768
|
+
|
769
|
+
Args:
|
770
|
+
input_id (str): The input ID for the input to create.
|
771
|
+
image_bytes (str): The bytes for the image.
|
772
|
+
video_bytes (str): The bytes for the video.
|
773
|
+
audio_bytes (str): The bytes for the audio.
|
774
|
+
text_bytes (str): The bytes for the text.
|
775
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
776
|
+
|
777
|
+
Returns:
|
778
|
+
input_job_id: job id for the upload request.
|
779
|
+
|
780
|
+
Example:
|
781
|
+
>>> from clarifai.client.input import Inputs
|
782
|
+
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
783
|
+
>>> image = open('demo.jpg', 'rb').read()
|
784
|
+
>>> input_obj.upload_from_bytes(input_id='demo', image_bytes=image)
|
785
|
+
"""
|
786
|
+
input_pb = self.get_input_from_bytes(
|
787
|
+
input_id, image_bytes, video_bytes, audio_bytes, text_bytes, dataset_id, **kwargs
|
788
|
+
)
|
789
|
+
return self.upload_inputs([input_pb])
|
790
|
+
|
791
|
+
def upload_text(
|
792
|
+
self, input_id: str, raw_text: str, dataset_id: str = None, **kwargs
|
793
|
+
) -> str: # text specific
|
794
|
+
"""Upload text from raw text.
|
795
|
+
|
796
|
+
Args:
|
797
|
+
input_id (str): The input ID for the input to create.
|
798
|
+
raw_text (str): The raw text.
|
799
|
+
dataset_id (str): The dataset ID for the dataset to add the input to.
|
800
|
+
|
801
|
+
Returns:
|
802
|
+
input_job_id (str): job id for the upload request.
|
803
|
+
|
804
|
+
Example:
|
805
|
+
>>> from clarifai.client.input import Inputs
|
806
|
+
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
807
|
+
>>> input_obj.upload_text(input_id = 'demo', raw_text = 'This is a test')
|
808
|
+
"""
|
809
|
+
input_pb = self._get_proto(
|
810
|
+
input_id=input_id,
|
811
|
+
dataset_id=dataset_id,
|
812
|
+
text_pb=resources_pb2.Text(raw=raw_text),
|
813
|
+
**kwargs,
|
814
|
+
)
|
815
|
+
return self.upload_inputs([input_pb])
|
816
|
+
|
817
|
+
def upload_inputs(self, inputs: List[Input], show_log: bool = True) -> str:
|
818
|
+
"""Upload list of input objects to the app.
|
819
|
+
|
820
|
+
Args:
|
821
|
+
inputs (list): List of input objects to upload.
|
822
|
+
show_log (bool): Show upload status log.
|
823
|
+
|
824
|
+
Returns:
|
825
|
+
input_job_id: job id for the upload request.
|
826
|
+
"""
|
827
|
+
if not isinstance(inputs, list):
|
828
|
+
raise UserError("inputs must be a list of Input objects")
|
829
|
+
if len(inputs) > MAX_UPLOAD_BATCH_SIZE:
|
830
|
+
raise UserError(
|
831
|
+
f"Number of inputs to upload exceeds the maximum batch size of {MAX_UPLOAD_BATCH_SIZE}. Please reduce batch size."
|
832
|
+
)
|
833
|
+
input_job_id = uuid.uuid4().hex # generate a unique id for this job
|
834
|
+
request = service_pb2.PostInputsRequest(
|
835
|
+
user_app_id=self.user_app_id, inputs=inputs, inputs_add_job_id=input_job_id
|
836
|
+
)
|
837
|
+
response = self._grpc_request(self.STUB.PostInputs, request)
|
838
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
839
|
+
if show_log:
|
840
|
+
self.logger.warning(response)
|
841
|
+
else:
|
842
|
+
return input_job_id, response
|
843
|
+
elif show_log:
|
844
|
+
self.logger.info("\nInputs Uploaded\n%s", response.status)
|
845
|
+
|
846
|
+
return input_job_id, response
|
847
|
+
|
848
|
+
def patch_inputs(self, inputs: List[Input], action: str = 'merge') -> None:
|
849
|
+
"""Patch list of input objects to the app.
|
850
|
+
|
851
|
+
Args:
|
852
|
+
inputs (list): List of input objects to upload.
|
853
|
+
action (str): Action to perform on the input. Options: 'merge', 'overwrite', 'remove'.
|
854
|
+
|
855
|
+
Returns:
|
856
|
+
response: Response from the grpc request.
|
857
|
+
"""
|
858
|
+
if not isinstance(inputs, list):
|
859
|
+
raise UserError("inputs must be a list of Input objects")
|
860
|
+
request = service_pb2.PatchInputsRequest(
|
861
|
+
user_app_id=self.user_app_id, inputs=inputs, action=action
|
862
|
+
)
|
863
|
+
response = self._grpc_request(self.STUB.PatchInputs, request)
|
864
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
865
|
+
try:
|
866
|
+
self.logger.warning(
|
867
|
+
f"Patch inputs failed, status: {response.annotations[0].status}"
|
868
|
+
)
|
869
|
+
except Exception:
|
870
|
+
self.logger.warning(f"Patch inputs failed, status: {response.status}")
|
871
|
+
else:
|
872
|
+
self.logger.info("\nPatch Inputs Successful\n%s", response.status)
|
873
|
+
|
874
|
+
def upload_annotations(
|
875
|
+
self, batch_annot: List[resources_pb2.Annotation], show_log: bool = True
|
876
|
+
) -> Union[List[resources_pb2.Annotation], List[None]]:
|
877
|
+
"""Upload image annotations to app.
|
878
|
+
|
879
|
+
Args:
|
880
|
+
batch_annot: annot batch protos
|
881
|
+
|
882
|
+
Returns:
|
883
|
+
retry_upload: failed annot upload
|
884
|
+
"""
|
885
|
+
retry_upload = [] # those that fail to upload are stored for retries
|
886
|
+
request = service_pb2.PostAnnotationsRequest(
|
887
|
+
user_app_id=self.user_app_id, annotations=batch_annot
|
888
|
+
)
|
889
|
+
response = self._grpc_request(self.STUB.PostAnnotations, request)
|
890
|
+
response_dict = MessageToDict(response)
|
891
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
392
892
|
try:
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
893
|
+
for annot in response_dict["annotations"]:
|
894
|
+
if annot['status']['code'] != status_code_pb2.ANNOTATION_SUCCESS:
|
895
|
+
self.logger.warning(f"Post annotations failed, status: {annot['status']}")
|
896
|
+
except Exception:
|
897
|
+
self.logger.warning(f"Post annotations failed due to {response.status}")
|
898
|
+
finally:
|
899
|
+
retry_upload.extend(batch_annot)
|
900
|
+
elif show_log:
|
901
|
+
self.logger.info("\nAnnotations Uploaded\n%s", response.status)
|
902
|
+
|
903
|
+
return retry_upload
|
904
|
+
|
905
|
+
def patch_annotations(
|
906
|
+
self, batch_annot: List[resources_pb2.Annotation], action: str = 'merge'
|
907
|
+
) -> None:
|
908
|
+
"""Patch image annotations to app.
|
909
|
+
|
910
|
+
Args:
|
911
|
+
batch_annot: annot batch protos
|
912
|
+
action (str): Action to perform on the input. Options: 'merge', 'overwrite', 'remove'.
|
913
|
+
|
914
|
+
"""
|
915
|
+
if not isinstance(batch_annot, list):
|
916
|
+
raise UserError("batch_annot must be a list of Annotation objects")
|
917
|
+
request = service_pb2.PatchAnnotationsRequest(
|
918
|
+
user_app_id=self.user_app_id, annotations=batch_annot, action=action
|
919
|
+
)
|
920
|
+
response = self._grpc_request(self.STUB.PatchAnnotations, request)
|
921
|
+
response_dict = MessageToDict(response)
|
922
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
923
|
+
try:
|
924
|
+
for annot in response_dict["annotations"]:
|
925
|
+
if annot['status']['code'] != status_code_pb2.ANNOTATION_SUCCESS:
|
926
|
+
self.logger.warning(f"Patch annotations failed, status: {annot['status']}")
|
927
|
+
except Exception:
|
928
|
+
self.logger.warning(f"Patch annotations failed due to {response.status}")
|
929
|
+
else:
|
930
|
+
self.logger.info("\nPatch Annotations Uploaded Successful\n%s", response.status)
|
931
|
+
|
932
|
+
def patch_concepts(
|
933
|
+
self,
|
934
|
+
concept_ids: List[str],
|
935
|
+
labels: List[str] = [],
|
936
|
+
values: List[float] = [],
|
937
|
+
action: str = 'overwrite',
|
938
|
+
) -> None:
|
939
|
+
"""Patch concepts to app.
|
940
|
+
|
941
|
+
Args:
|
942
|
+
concept_ids: A list of concept
|
943
|
+
labels: A list of label names
|
944
|
+
values: concept value
|
945
|
+
action (str): Action to perform on the input. Options: 'overwrite'.
|
946
|
+
|
947
|
+
"""
|
948
|
+
if not labels:
|
949
|
+
labels = list(concept_ids)
|
950
|
+
if values:
|
951
|
+
concepts = [
|
952
|
+
resources_pb2.Concept(id=concept_id, name=label, value=value)
|
953
|
+
for concept_id, label, value in zip(concept_ids, labels, values)
|
954
|
+
]
|
400
955
|
else:
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
956
|
+
concepts = [
|
957
|
+
resources_pb2.Concept(id=concept_id, name=label, value=1.0)
|
958
|
+
for concept_id, label in zip(concept_ids, labels)
|
959
|
+
]
|
960
|
+
request = service_pb2.PatchConceptsRequest(
|
961
|
+
user_app_id=self.user_app_id, concepts=concepts, action=action
|
962
|
+
)
|
963
|
+
response = self._grpc_request(self.STUB.PatchConcepts, request)
|
964
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
965
|
+
self.logger.warning(f"Patch Concepts failed, status: {response.status.details}")
|
411
966
|
else:
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
967
|
+
self.logger.info("\nPatch Concepts Successful\n%s", response.status)
|
968
|
+
|
969
|
+
def _upload_batch(self, inputs: List[Input]) -> List[Input]:
|
970
|
+
"""Upload a batch of input objects to the app.
|
971
|
+
|
972
|
+
Args:
|
973
|
+
inputs (List[Input]): List of input objects to upload.
|
974
|
+
|
975
|
+
Returns:
|
976
|
+
input_job_id: job id for the upload request.
|
977
|
+
"""
|
978
|
+
input_job_id, _ = self.upload_inputs(inputs, False)
|
979
|
+
self._wait_for_inputs(input_job_id)
|
980
|
+
failed_inputs = self._delete_failed_inputs(inputs)
|
981
|
+
|
982
|
+
return failed_inputs
|
983
|
+
|
984
|
+
def delete_inputs(self, inputs: List[Input]) -> None:
|
985
|
+
"""Delete list of input objects from the app.
|
986
|
+
|
987
|
+
Args:
|
988
|
+
input_ids (Input): List of input objects to delete.
|
989
|
+
|
990
|
+
Example:
|
991
|
+
>>> from clarifai.client.user import User
|
992
|
+
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
993
|
+
>>> input_obj.delete_inputs(list(input_obj.list_inputs()))
|
994
|
+
"""
|
995
|
+
if not isinstance(inputs, list):
|
996
|
+
raise UserError("input_ids must be a list of input ids")
|
997
|
+
inputs_ids = [input.id for input in inputs]
|
998
|
+
request = service_pb2.DeleteInputsRequest(user_app_id=self.user_app_id, ids=inputs_ids)
|
999
|
+
response = self._grpc_request(self.STUB.DeleteInputs, request)
|
1000
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
1001
|
+
raise Exception(response.status)
|
1002
|
+
self.logger.info("\nInputs Deleted\n%s", response.status)
|
1003
|
+
|
1004
|
+
def delete_annotations(self, input_ids: List[str], annotation_ids: List[str] = []) -> None:
|
1005
|
+
"""Delete list of annotations of input objects from the app.
|
1006
|
+
|
1007
|
+
Args:
|
1008
|
+
input_ids (Input): List of input objects for which annotations to delete.
|
1009
|
+
annotation_ids (List[str]): List of annotation ids to delete.
|
1010
|
+
|
1011
|
+
Example:
|
1012
|
+
>>> from clarifai.client.user import User
|
1013
|
+
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
1014
|
+
>>> input_obj.delete_annotations(input_ids=['input_id_1', 'input_id_2'])
|
1015
|
+
|
1016
|
+
Note:
|
1017
|
+
'annotation_ids' are optional but if the are provided, the number and order in
|
1018
|
+
'annotation_ids' and 'input_ids' should match
|
1019
|
+
"""
|
1020
|
+
if not isinstance(input_ids, list):
|
1021
|
+
raise UserError("input_ids must be a list of input ids")
|
1022
|
+
if annotation_ids and len(input_ids) != len(annotation_ids):
|
1023
|
+
raise UserError("Number of provided annotation_ids and input_ids should match.")
|
1024
|
+
request = service_pb2.DeleteAnnotationsRequest(
|
1025
|
+
user_app_id=self.user_app_id, ids=annotation_ids, input_ids=input_ids
|
1026
|
+
)
|
1027
|
+
response = self._grpc_request(self.STUB.DeleteAnnotations, request)
|
1028
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
1029
|
+
raise Exception(response.status)
|
1030
|
+
self.logger.info("\nAnnotations Deleted\n%s", response.status)
|
1031
|
+
|
1032
|
+
def download_inputs(self, inputs: List[Input]) -> List[bytes]:
|
1033
|
+
"""Download list of input objects from the app.
|
1034
|
+
|
1035
|
+
Args:
|
1036
|
+
input_ids (Input): List of input objects to download.
|
1037
|
+
|
1038
|
+
Example:
|
1039
|
+
>>> from clarifai.client.user import User
|
1040
|
+
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
1041
|
+
>>> input_obj.download_inputs(list(input_obj.list_inputs()))
|
1042
|
+
"""
|
1043
|
+
if not isinstance(inputs, list):
|
1044
|
+
raise UserError("input_ids must be a list of input ids")
|
1045
|
+
final_inputs = []
|
1046
|
+
# initiate session
|
1047
|
+
session = requests.Session()
|
1048
|
+
retries = Retry(total=3, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
|
1049
|
+
session.mount('https://', HTTPAdapter(max_retries=retries))
|
1050
|
+
session.headers.update({'Authorization': self.metadata[0][1]})
|
1051
|
+
# download inputs
|
1052
|
+
data_types = ['image', 'video', 'audio', 'text']
|
1053
|
+
for input in inputs:
|
1054
|
+
for data_type in data_types:
|
1055
|
+
url = getattr(input.data, data_type).url
|
1056
|
+
if url:
|
1057
|
+
response = session.get(url, stream=True)
|
1058
|
+
if response.status_code == 200:
|
1059
|
+
final_inputs.append(response.content)
|
1060
|
+
|
1061
|
+
return final_inputs
|
1062
|
+
|
1063
|
+
def list_inputs(
|
1064
|
+
self,
|
1065
|
+
dataset_id: str = None,
|
1066
|
+
page_no: int = None,
|
1067
|
+
per_page: int = None,
|
1068
|
+
input_type: str = None,
|
1069
|
+
) -> Generator[Input, None, None]:
|
1070
|
+
"""Lists all the inputs for the app.
|
1071
|
+
|
1072
|
+
Args:
|
1073
|
+
dataset_id (str): The dataset ID for the dataset to list inputs from.
|
1074
|
+
page_no (int): The page number to list.
|
1075
|
+
per_page (int): The number of items per page.
|
1076
|
+
input_type (str): The type of input to list. Options: 'image', 'video', 'audio', 'text'.
|
1077
|
+
|
1078
|
+
Yields:
|
1079
|
+
Input: Input objects for the app.
|
1080
|
+
|
1081
|
+
Example:
|
1082
|
+
>>> from clarifai.client.user import User
|
1083
|
+
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
1084
|
+
>>> all_inputs = list(input_obj.list_inputs(input_type='image'))
|
1085
|
+
|
1086
|
+
Note:
|
1087
|
+
Defaults to 16 per page if page_no is specified and per_page is not specified.
|
1088
|
+
If both page_no and per_page are None, then lists all the resources.
|
1089
|
+
"""
|
1090
|
+
if input_type not in ['image', 'text', 'video', 'audio', None]:
|
1091
|
+
raise UserError('Invalid input type, it should be image,text,audio or video')
|
1092
|
+
if dataset_id:
|
1093
|
+
request_data = dict(user_app_id=self.user_app_id, dataset_id=dataset_id)
|
1094
|
+
all_inputs_info = self.list_pages_generator(
|
1095
|
+
self.STUB.ListDatasetInputs,
|
1096
|
+
service_pb2.ListDatasetInputsRequest,
|
1097
|
+
request_data,
|
1098
|
+
per_page=per_page,
|
1099
|
+
page_no=page_no,
|
1100
|
+
)
|
441
1101
|
else:
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
#
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
input_annot_proto = resources_pb2.Annotation(input_id=input_id, data=annot_data)
|
527
|
-
|
528
|
-
return input_annot_proto
|
529
|
-
|
530
|
-
@staticmethod
|
531
|
-
def get_mask_proto(input_id: str,
|
532
|
-
label: str,
|
533
|
-
polygons: List[List[float]],
|
534
|
-
label_id: str = None,
|
535
|
-
annot_id: str = None) -> Annotation:
|
536
|
-
"""Create an annotation proto for each polygon box, label input pair.
|
537
|
-
|
538
|
-
Args:
|
539
|
-
input_id (str): The input ID for the annotation to create.
|
540
|
-
label (str): annotation label name
|
541
|
-
polygons (List): Polygon x,y points iterable
|
542
|
-
label_id (str): annotation label ID
|
543
|
-
annot_id (str): annotation ID
|
544
|
-
|
545
|
-
Returns:
|
546
|
-
An annotation object for the specified input ID.
|
547
|
-
|
548
|
-
Example:
|
549
|
-
>>> from clarifai.client.input import Inputs
|
550
|
-
>>> Inputs.get_mask_proto(input_id='demo', label='demo', polygons=[[[x,y],...,[x,y]],...])
|
551
|
-
"""
|
552
|
-
if not isinstance(polygons, list):
|
553
|
-
raise UserError("polygons must be a list of points")
|
554
|
-
annot_data = resources_pb2.Data(regions=[
|
555
|
-
resources_pb2.Region(
|
556
|
-
region_info=resources_pb2.RegionInfo(polygon=resources_pb2.Polygon(
|
557
|
-
points=[
|
558
|
-
resources_pb2.Point(
|
559
|
-
row=_point[1], # row is y point
|
560
|
-
col=_point[0], # col is x point
|
561
|
-
visibility="VISIBLE") for _point in polygons
|
562
|
-
])),
|
563
|
-
data=resources_pb2.Data(concepts=[
|
564
|
-
resources_pb2.Concept(id=label, name=label, value=1.)
|
565
|
-
if not label_id else resources_pb2.Concept(id=label_id, name=label, value=1.)
|
566
|
-
]))
|
567
|
-
])
|
568
|
-
if annot_id:
|
569
|
-
input_mask_proto = resources_pb2.Annotation(id=annot_id, input_id=input_id, data=annot_data)
|
570
|
-
else:
|
571
|
-
input_mask_proto = resources_pb2.Annotation(input_id=input_id, data=annot_data)
|
572
|
-
|
573
|
-
return input_mask_proto
|
574
|
-
|
575
|
-
def get_input(self, input_id: str) -> Input:
|
576
|
-
"""Get Input object of input with input_id provided from the app.
|
577
|
-
|
578
|
-
Args:
|
579
|
-
input_id (str): The input ID for the annotation to get.
|
580
|
-
|
581
|
-
Returns:
|
582
|
-
Input: An Input object for the specified input ID.
|
583
|
-
|
584
|
-
Example:
|
585
|
-
>>> from clarifai.client.input import Inputs
|
586
|
-
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
587
|
-
>>> input_obj.get_input(input_id='demo')
|
588
|
-
"""
|
589
|
-
request = service_pb2.GetInputRequest(user_app_id=self.user_app_id, input_id=input_id)
|
590
|
-
response = self._grpc_request(self.STUB.GetInput, request)
|
591
|
-
return response.input
|
592
|
-
|
593
|
-
def upload_from_url(self,
|
594
|
-
input_id: str,
|
595
|
-
image_url: str = None,
|
596
|
-
video_url: str = None,
|
597
|
-
audio_url: str = None,
|
598
|
-
text_url: str = None,
|
599
|
-
dataset_id: str = None,
|
600
|
-
**kwargs) -> str:
|
601
|
-
"""Upload input from url.
|
602
|
-
|
603
|
-
Args:
|
604
|
-
input_id (str): The input ID for the input to create.
|
605
|
-
image_url (str): The url for the image.
|
606
|
-
video_url (str): The url for the video.
|
607
|
-
audio_url (str): The url for the audio.
|
608
|
-
text_url (str): The url for the text.
|
609
|
-
dataset_id (str): The dataset ID for the dataset to add the input to.
|
610
|
-
|
611
|
-
Returns:
|
612
|
-
input_job_id: job id for the upload request.
|
613
|
-
|
614
|
-
Example:
|
615
|
-
>>> from clarifai.client.input import Inputs
|
616
|
-
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
617
|
-
>>> input_obj.upload_from_url(input_id='demo', image_url='https://samples.clarifai.com/metro-north.jpg')
|
618
|
-
"""
|
619
|
-
input_pb = self.get_input_from_url(input_id, image_url, video_url, audio_url, text_url,
|
620
|
-
dataset_id, **kwargs)
|
621
|
-
return self.upload_inputs([input_pb])
|
622
|
-
|
623
|
-
def upload_from_file(self,
|
624
|
-
input_id: str,
|
625
|
-
image_file: str = None,
|
626
|
-
video_file: str = None,
|
627
|
-
audio_file: str = None,
|
628
|
-
text_file: str = None,
|
629
|
-
dataset_id: str = None,
|
630
|
-
**kwargs) -> str:
|
631
|
-
"""Upload input from file.
|
632
|
-
|
633
|
-
Args:
|
634
|
-
input_id (str): The input ID for the input to create.
|
635
|
-
image_file (str): The file for the image.
|
636
|
-
video_file (str): The file for the video.
|
637
|
-
audio_file (str): The file for the audio.
|
638
|
-
text_file (str): The file for the text.
|
639
|
-
dataset_id (str): The dataset ID for the dataset to add the input to.
|
640
|
-
|
641
|
-
Returns:
|
642
|
-
input_job_id: job id for the upload request.
|
643
|
-
|
644
|
-
Example:
|
645
|
-
>>> from clarifai.client.input import Inputs
|
646
|
-
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
647
|
-
>>> input_obj.upload_from_file(input_id='demo', audio_file='demo.mp3')
|
648
|
-
"""
|
649
|
-
input_pb = self.get_input_from_file(input_id, image_file, video_file, audio_file, text_file,
|
650
|
-
dataset_id, **kwargs)
|
651
|
-
return self.upload_inputs([input_pb])
|
652
|
-
|
653
|
-
def upload_from_bytes(self,
|
654
|
-
input_id: str,
|
655
|
-
image_bytes: bytes = None,
|
656
|
-
video_bytes: bytes = None,
|
657
|
-
audio_bytes: bytes = None,
|
658
|
-
text_bytes: bytes = None,
|
659
|
-
dataset_id: str = None,
|
660
|
-
**kwargs) -> str:
|
661
|
-
"""Upload input from bytes.
|
662
|
-
|
663
|
-
Args:
|
664
|
-
input_id (str): The input ID for the input to create.
|
665
|
-
image_bytes (str): The bytes for the image.
|
666
|
-
video_bytes (str): The bytes for the video.
|
667
|
-
audio_bytes (str): The bytes for the audio.
|
668
|
-
text_bytes (str): The bytes for the text.
|
669
|
-
dataset_id (str): The dataset ID for the dataset to add the input to.
|
670
|
-
|
671
|
-
Returns:
|
672
|
-
input_job_id: job id for the upload request.
|
673
|
-
|
674
|
-
Example:
|
675
|
-
>>> from clarifai.client.input import Inputs
|
676
|
-
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
677
|
-
>>> image = open('demo.jpg', 'rb').read()
|
678
|
-
>>> input_obj.upload_from_bytes(input_id='demo', image_bytes=image)
|
679
|
-
"""
|
680
|
-
input_pb = self.get_input_from_bytes(input_id, image_bytes, video_bytes, audio_bytes,
|
681
|
-
text_bytes, dataset_id, **kwargs)
|
682
|
-
return self.upload_inputs([input_pb])
|
683
|
-
|
684
|
-
def upload_text(self, input_id: str, raw_text: str, dataset_id: str = None,
|
685
|
-
**kwargs) -> str: #text specific
|
686
|
-
"""Upload text from raw text.
|
687
|
-
|
688
|
-
Args:
|
689
|
-
input_id (str): The input ID for the input to create.
|
690
|
-
raw_text (str): The raw text.
|
691
|
-
dataset_id (str): The dataset ID for the dataset to add the input to.
|
692
|
-
|
693
|
-
Returns:
|
694
|
-
input_job_id (str): job id for the upload request.
|
695
|
-
|
696
|
-
Example:
|
697
|
-
>>> from clarifai.client.input import Inputs
|
698
|
-
>>> input_obj = Inputs(user_id = 'user_id', app_id = 'demo_app')
|
699
|
-
>>> input_obj.upload_text(input_id = 'demo', raw_text = 'This is a test')
|
700
|
-
"""
|
701
|
-
input_pb = self._get_proto(
|
702
|
-
input_id=input_id,
|
703
|
-
dataset_id=dataset_id,
|
704
|
-
text_pb=resources_pb2.Text(raw=raw_text),
|
705
|
-
**kwargs)
|
706
|
-
return self.upload_inputs([input_pb])
|
707
|
-
|
708
|
-
def upload_inputs(self, inputs: List[Input], show_log: bool = True) -> str:
|
709
|
-
"""Upload list of input objects to the app.
|
710
|
-
|
711
|
-
Args:
|
712
|
-
inputs (list): List of input objects to upload.
|
713
|
-
show_log (bool): Show upload status log.
|
714
|
-
|
715
|
-
Returns:
|
716
|
-
input_job_id: job id for the upload request.
|
717
|
-
"""
|
718
|
-
if not isinstance(inputs, list):
|
719
|
-
raise UserError("inputs must be a list of Input objects")
|
720
|
-
if len(inputs) > MAX_UPLOAD_BATCH_SIZE:
|
721
|
-
raise UserError(
|
722
|
-
f"Number of inputs to upload exceeds the maximum batch size of {MAX_UPLOAD_BATCH_SIZE}. Please reduce batch size."
|
723
|
-
)
|
724
|
-
input_job_id = uuid.uuid4().hex # generate a unique id for this job
|
725
|
-
request = service_pb2.PostInputsRequest(
|
726
|
-
user_app_id=self.user_app_id, inputs=inputs, inputs_add_job_id=input_job_id)
|
727
|
-
response = self._grpc_request(self.STUB.PostInputs, request)
|
728
|
-
if response.status.code != status_code_pb2.SUCCESS:
|
729
|
-
if show_log:
|
730
|
-
self.logger.warning(response)
|
731
|
-
else:
|
732
|
-
return input_job_id, response
|
733
|
-
else:
|
734
|
-
if show_log:
|
735
|
-
self.logger.info("\nInputs Uploaded\n%s", response.status)
|
736
|
-
|
737
|
-
return input_job_id, response
|
738
|
-
|
739
|
-
def patch_inputs(self, inputs: List[Input], action: str = 'merge') -> None:
|
740
|
-
"""Patch list of input objects to the app.
|
741
|
-
|
742
|
-
Args:
|
743
|
-
inputs (list): List of input objects to upload.
|
744
|
-
action (str): Action to perform on the input. Options: 'merge', 'overwrite', 'remove'.
|
745
|
-
|
746
|
-
Returns:
|
747
|
-
response: Response from the grpc request.
|
748
|
-
"""
|
749
|
-
if not isinstance(inputs, list):
|
750
|
-
raise UserError("inputs must be a list of Input objects")
|
751
|
-
request = service_pb2.PatchInputsRequest(
|
752
|
-
user_app_id=self.user_app_id, inputs=inputs, action=action)
|
753
|
-
response = self._grpc_request(self.STUB.PatchInputs, request)
|
754
|
-
if response.status.code != status_code_pb2.SUCCESS:
|
755
|
-
try:
|
756
|
-
self.logger.warning(f"Patch inputs failed, status: {response.annotations[0].status}")
|
757
|
-
except Exception:
|
758
|
-
self.logger.warning(f"Patch inputs failed, status: {response.status}")
|
759
|
-
else:
|
760
|
-
self.logger.info("\nPatch Inputs Successful\n%s", response.status)
|
761
|
-
|
762
|
-
def upload_annotations(self, batch_annot: List[resources_pb2.Annotation], show_log: bool = True
|
763
|
-
) -> Union[List[resources_pb2.Annotation], List[None]]:
|
764
|
-
"""Upload image annotations to app.
|
765
|
-
|
766
|
-
Args:
|
767
|
-
batch_annot: annot batch protos
|
768
|
-
|
769
|
-
Returns:
|
770
|
-
retry_upload: failed annot upload
|
771
|
-
"""
|
772
|
-
retry_upload = [] # those that fail to upload are stored for retries
|
773
|
-
request = service_pb2.PostAnnotationsRequest(
|
774
|
-
user_app_id=self.user_app_id, annotations=batch_annot)
|
775
|
-
response = self._grpc_request(self.STUB.PostAnnotations, request)
|
776
|
-
response_dict = MessageToDict(response)
|
777
|
-
if response.status.code != status_code_pb2.SUCCESS:
|
778
|
-
try:
|
779
|
-
for annot in response_dict["annotations"]:
|
780
|
-
if annot['status']['code'] != status_code_pb2.ANNOTATION_SUCCESS:
|
781
|
-
self.logger.warning(f"Post annotations failed, status: {annot['status']}")
|
782
|
-
except Exception:
|
783
|
-
self.logger.warning(f"Post annotations failed due to {response.status}")
|
784
|
-
finally:
|
785
|
-
retry_upload.extend(batch_annot)
|
786
|
-
else:
|
787
|
-
if show_log:
|
788
|
-
self.logger.info("\nAnnotations Uploaded\n%s", response.status)
|
789
|
-
|
790
|
-
return retry_upload
|
791
|
-
|
792
|
-
def patch_annotations(self, batch_annot: List[resources_pb2.Annotation],
|
793
|
-
action: str = 'merge') -> None:
|
794
|
-
"""Patch image annotations to app.
|
795
|
-
|
796
|
-
Args:
|
797
|
-
batch_annot: annot batch protos
|
798
|
-
action (str): Action to perform on the input. Options: 'merge', 'overwrite', 'remove'.
|
799
|
-
|
800
|
-
"""
|
801
|
-
if not isinstance(batch_annot, list):
|
802
|
-
raise UserError("batch_annot must be a list of Annotation objects")
|
803
|
-
request = service_pb2.PatchAnnotationsRequest(
|
804
|
-
user_app_id=self.user_app_id, annotations=batch_annot, action=action)
|
805
|
-
response = self._grpc_request(self.STUB.PatchAnnotations, request)
|
806
|
-
response_dict = MessageToDict(response)
|
807
|
-
if response.status.code != status_code_pb2.SUCCESS:
|
808
|
-
try:
|
809
|
-
for annot in response_dict["annotations"]:
|
810
|
-
if annot['status']['code'] != status_code_pb2.ANNOTATION_SUCCESS:
|
811
|
-
self.logger.warning(f"Patch annotations failed, status: {annot['status']}")
|
812
|
-
except Exception:
|
813
|
-
self.logger.warning(f"Patch annotations failed due to {response.status}")
|
814
|
-
else:
|
815
|
-
self.logger.info("\nPatch Annotations Uploaded Successful\n%s", response.status)
|
816
|
-
|
817
|
-
def patch_concepts(self,
|
818
|
-
concept_ids: List[str],
|
819
|
-
labels: List[str] = [],
|
820
|
-
values: List[float] = [],
|
821
|
-
action: str = 'overwrite') -> None:
|
822
|
-
"""Patch concepts to app.
|
823
|
-
|
824
|
-
Args:
|
825
|
-
concept_ids: A list of concept
|
826
|
-
labels: A list of label names
|
827
|
-
values: concept value
|
828
|
-
action (str): Action to perform on the input. Options: 'overwrite'.
|
829
|
-
|
830
|
-
"""
|
831
|
-
if not labels:
|
832
|
-
labels = list(concept_ids)
|
833
|
-
if values:
|
834
|
-
concepts=[
|
835
|
-
resources_pb2.Concept(
|
836
|
-
id=concept_id, name=label, value=value)\
|
837
|
-
for concept_id, label, value in zip(concept_ids, labels, values)
|
838
|
-
]
|
839
|
-
else:
|
840
|
-
concepts=[
|
841
|
-
resources_pb2.Concept(
|
842
|
-
id=concept_id, name=label, value=1.)\
|
843
|
-
for concept_id, label in zip(concept_ids, labels)
|
844
|
-
]
|
845
|
-
request = service_pb2.PatchConceptsRequest(
|
846
|
-
user_app_id=self.user_app_id, concepts=concepts, action=action)
|
847
|
-
response = self._grpc_request(self.STUB.PatchConcepts, request)
|
848
|
-
if response.status.code != status_code_pb2.SUCCESS:
|
849
|
-
self.logger.warning(f"Patch Concepts failed, status: {response.status.details}")
|
850
|
-
else:
|
851
|
-
self.logger.info("\nPatch Concepts Successful\n%s", response.status)
|
852
|
-
|
853
|
-
def _upload_batch(self, inputs: List[Input]) -> List[Input]:
|
854
|
-
"""Upload a batch of input objects to the app.
|
855
|
-
|
856
|
-
Args:
|
857
|
-
inputs (List[Input]): List of input objects to upload.
|
858
|
-
|
859
|
-
Returns:
|
860
|
-
input_job_id: job id for the upload request.
|
861
|
-
"""
|
862
|
-
input_job_id, _ = self.upload_inputs(inputs, False)
|
863
|
-
self._wait_for_inputs(input_job_id)
|
864
|
-
failed_inputs = self._delete_failed_inputs(inputs)
|
865
|
-
|
866
|
-
return failed_inputs
|
867
|
-
|
868
|
-
def delete_inputs(self, inputs: List[Input]) -> None:
|
869
|
-
"""Delete list of input objects from the app.
|
870
|
-
|
871
|
-
Args:
|
872
|
-
input_ids (Input): List of input objects to delete.
|
873
|
-
|
874
|
-
Example:
|
875
|
-
>>> from clarifai.client.user import User
|
876
|
-
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
877
|
-
>>> input_obj.delete_inputs(list(input_obj.list_inputs()))
|
878
|
-
"""
|
879
|
-
if not isinstance(inputs, list):
|
880
|
-
raise UserError("input_ids must be a list of input ids")
|
881
|
-
inputs_ids = [input.id for input in inputs]
|
882
|
-
request = service_pb2.DeleteInputsRequest(user_app_id=self.user_app_id, ids=inputs_ids)
|
883
|
-
response = self._grpc_request(self.STUB.DeleteInputs, request)
|
884
|
-
if response.status.code != status_code_pb2.SUCCESS:
|
885
|
-
raise Exception(response.status)
|
886
|
-
self.logger.info("\nInputs Deleted\n%s", response.status)
|
887
|
-
|
888
|
-
def delete_annotations(self, input_ids: List[str], annotation_ids: List[str] = []) -> None:
|
889
|
-
"""Delete list of annotations of input objects from the app.
|
890
|
-
|
891
|
-
Args:
|
892
|
-
input_ids (Input): List of input objects for which annotations to delete.
|
893
|
-
annotation_ids (List[str]): List of annotation ids to delete.
|
894
|
-
|
895
|
-
Example:
|
896
|
-
>>> from clarifai.client.user import User
|
897
|
-
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
898
|
-
>>> input_obj.delete_annotations(input_ids=['input_id_1', 'input_id_2'])
|
899
|
-
|
900
|
-
Note:
|
901
|
-
'annotation_ids' are optional but if the are provided, the number and order in
|
902
|
-
'annotation_ids' and 'input_ids' should match
|
903
|
-
"""
|
904
|
-
if not isinstance(input_ids, list):
|
905
|
-
raise UserError("input_ids must be a list of input ids")
|
906
|
-
if annotation_ids and len(input_ids) != len(annotation_ids):
|
907
|
-
raise UserError("Number of provided annotation_ids and input_ids should match.")
|
908
|
-
request = service_pb2.DeleteAnnotationsRequest(
|
909
|
-
user_app_id=self.user_app_id, ids=annotation_ids, input_ids=input_ids)
|
910
|
-
response = self._grpc_request(self.STUB.DeleteAnnotations, request)
|
911
|
-
if response.status.code != status_code_pb2.SUCCESS:
|
912
|
-
raise Exception(response.status)
|
913
|
-
self.logger.info("\nAnnotations Deleted\n%s", response.status)
|
914
|
-
|
915
|
-
def download_inputs(self, inputs: List[Input]) -> List[bytes]:
|
916
|
-
"""Download list of input objects from the app.
|
917
|
-
|
918
|
-
Args:
|
919
|
-
input_ids (Input): List of input objects to download.
|
920
|
-
|
921
|
-
Example:
|
922
|
-
>>> from clarifai.client.user import User
|
923
|
-
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
924
|
-
>>> input_obj.download_inputs(list(input_obj.list_inputs()))
|
925
|
-
"""
|
926
|
-
if not isinstance(inputs, list):
|
927
|
-
raise UserError("input_ids must be a list of input ids")
|
928
|
-
final_inputs = []
|
929
|
-
#initiate session
|
930
|
-
session = requests.Session()
|
931
|
-
retries = Retry(total=3, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
|
932
|
-
session.mount('https://', HTTPAdapter(max_retries=retries))
|
933
|
-
session.headers.update({'Authorization': self.metadata[0][1]})
|
934
|
-
# download inputs
|
935
|
-
data_types = ['image', 'video', 'audio', 'text']
|
936
|
-
for input in inputs:
|
937
|
-
for data_type in data_types:
|
938
|
-
url = getattr(input.data, data_type).url
|
939
|
-
if url:
|
940
|
-
response = session.get(url, stream=True)
|
941
|
-
if response.status_code == 200:
|
942
|
-
final_inputs.append(response.content)
|
943
|
-
|
944
|
-
return final_inputs
|
945
|
-
|
946
|
-
def list_inputs(self,
|
947
|
-
dataset_id: str = None,
|
948
|
-
page_no: int = None,
|
949
|
-
per_page: int = None,
|
950
|
-
input_type: str = None) -> Generator[Input, None, None]:
|
951
|
-
"""Lists all the inputs for the app.
|
952
|
-
|
953
|
-
Args:
|
954
|
-
dataset_id (str): The dataset ID for the dataset to list inputs from.
|
955
|
-
page_no (int): The page number to list.
|
956
|
-
per_page (int): The number of items per page.
|
957
|
-
input_type (str): The type of input to list. Options: 'image', 'video', 'audio', 'text'.
|
958
|
-
|
959
|
-
Yields:
|
960
|
-
Input: Input objects for the app.
|
961
|
-
|
962
|
-
Example:
|
963
|
-
>>> from clarifai.client.user import User
|
964
|
-
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
965
|
-
>>> all_inputs = list(input_obj.list_inputs(input_type='image'))
|
966
|
-
|
967
|
-
Note:
|
968
|
-
Defaults to 16 per page if page_no is specified and per_page is not specified.
|
969
|
-
If both page_no and per_page are None, then lists all the resources.
|
970
|
-
"""
|
971
|
-
if input_type not in ['image', 'text', 'video', 'audio', None]:
|
972
|
-
raise UserError('Invalid input type, it should be image,text,audio or video')
|
973
|
-
if dataset_id:
|
974
|
-
request_data = dict(user_app_id=self.user_app_id, dataset_id=dataset_id)
|
975
|
-
all_inputs_info = self.list_pages_generator(
|
976
|
-
self.STUB.ListDatasetInputs,
|
977
|
-
service_pb2.ListDatasetInputsRequest,
|
978
|
-
request_data,
|
979
|
-
per_page=per_page,
|
980
|
-
page_no=page_no)
|
981
|
-
else:
|
982
|
-
request_data = dict(user_app_id=self.user_app_id)
|
983
|
-
all_inputs_info = self.list_pages_generator(
|
984
|
-
self.STUB.ListInputs,
|
985
|
-
service_pb2.ListInputsRequest,
|
986
|
-
request_data,
|
987
|
-
per_page=per_page,
|
988
|
-
page_no=page_no)
|
989
|
-
for input_info in all_inputs_info:
|
990
|
-
input_info['id'] = input_info.pop('dataset_input_id') if dataset_id else input_info.pop(
|
991
|
-
'input_id')
|
992
|
-
if input_type:
|
993
|
-
if input_type not in input_info['data'].keys():
|
994
|
-
continue
|
995
|
-
yield resources_pb2.Input(**input_info)
|
996
|
-
|
997
|
-
def list_annotations(self,
|
998
|
-
batch_input: List[Input] = None,
|
999
|
-
page_no: int = None,
|
1000
|
-
per_page: int = None) -> Generator[Annotation, None, None]:
|
1001
|
-
"""Lists all the annotations for the app.
|
1002
|
-
|
1003
|
-
Args:
|
1004
|
-
batch_input (List[Input]): The input objects to list annotations from.
|
1005
|
-
page_no (int): The page number to list.
|
1006
|
-
per_page (int): The number of items per page.
|
1007
|
-
|
1008
|
-
Yields:
|
1009
|
-
Annotation: Annotation objects for the app.
|
1010
|
-
|
1011
|
-
Example:
|
1012
|
-
>>> from clarifai.client.user import User
|
1013
|
-
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
1014
|
-
>>> all_inputs = list(input_obj.list_inputs(input_type='image'))
|
1015
|
-
>>> all_annotations = list(input_obj.list_annotations(batch_input=all_inputs))
|
1016
|
-
|
1017
|
-
Note:
|
1018
|
-
If batch_input is not given, then lists all the annotations for the app.
|
1019
|
-
Defaults to 16 per page if page_no is specified and per_page is not specified.
|
1020
|
-
If both page_no and per_page are None, then lists all the resources.
|
1021
|
-
"""
|
1022
|
-
request_data = dict(
|
1023
|
-
user_app_id=self.user_app_id,
|
1024
|
-
input_ids=[input.id for input in batch_input] if batch_input else None)
|
1025
|
-
all_annotations_info = self.list_pages_generator(
|
1026
|
-
self.STUB.ListAnnotations,
|
1027
|
-
service_pb2.ListAnnotationsRequest,
|
1028
|
-
request_data,
|
1029
|
-
per_page=per_page,
|
1030
|
-
page_no=page_no)
|
1031
|
-
for annotations_info in all_annotations_info:
|
1032
|
-
annotations_info['id'] = annotations_info.pop('annotation_id')
|
1033
|
-
yield Annotation(**annotations_info)
|
1034
|
-
|
1035
|
-
def _bulk_upload(self, inputs: List[Input], batch_size: int = 128) -> None:
|
1036
|
-
"""Uploads process for large number of inputs.
|
1037
|
-
|
1038
|
-
Args:
|
1039
|
-
inputs (List[Input]): input protos
|
1040
|
-
batch_size (int): batch size for each request
|
1041
|
-
"""
|
1042
|
-
num_workers: int = min(10, cpu_count()) # limit max workers to 10
|
1043
|
-
batch_size = min(128, batch_size) # limit max protos in a req
|
1044
|
-
chunked_inputs = Chunker(inputs, batch_size).chunk()
|
1045
|
-
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
1046
|
-
with tqdm(total=len(chunked_inputs), desc='Uploading inputs') as progress:
|
1047
|
-
# Submit all jobs to the executor and store the returned futures
|
1048
|
-
futures = [
|
1049
|
-
executor.submit(self._upload_batch, batch_input_ids)
|
1050
|
-
for batch_input_ids in chunked_inputs
|
1051
|
-
]
|
1102
|
+
request_data = dict(user_app_id=self.user_app_id)
|
1103
|
+
all_inputs_info = self.list_pages_generator(
|
1104
|
+
self.STUB.ListInputs,
|
1105
|
+
service_pb2.ListInputsRequest,
|
1106
|
+
request_data,
|
1107
|
+
per_page=per_page,
|
1108
|
+
page_no=page_no,
|
1109
|
+
)
|
1110
|
+
for input_info in all_inputs_info:
|
1111
|
+
input_info['id'] = (
|
1112
|
+
input_info.pop('dataset_input_id') if dataset_id else input_info.pop('input_id')
|
1113
|
+
)
|
1114
|
+
if input_type:
|
1115
|
+
if input_type not in input_info['data'].keys():
|
1116
|
+
continue
|
1117
|
+
yield resources_pb2.Input(**input_info)
|
1118
|
+
|
1119
|
+
def list_annotations(
|
1120
|
+
self, batch_input: List[Input] = None, page_no: int = None, per_page: int = None
|
1121
|
+
) -> Generator[Annotation, None, None]:
|
1122
|
+
"""Lists all the annotations for the app.
|
1123
|
+
|
1124
|
+
Args:
|
1125
|
+
batch_input (List[Input]): The input objects to list annotations from.
|
1126
|
+
page_no (int): The page number to list.
|
1127
|
+
per_page (int): The number of items per page.
|
1128
|
+
|
1129
|
+
Yields:
|
1130
|
+
Annotation: Annotation objects for the app.
|
1131
|
+
|
1132
|
+
Example:
|
1133
|
+
>>> from clarifai.client.user import User
|
1134
|
+
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
1135
|
+
>>> all_inputs = list(input_obj.list_inputs(input_type='image'))
|
1136
|
+
>>> all_annotations = list(input_obj.list_annotations(batch_input=all_inputs))
|
1137
|
+
|
1138
|
+
Note:
|
1139
|
+
If batch_input is not given, then lists all the annotations for the app.
|
1140
|
+
Defaults to 16 per page if page_no is specified and per_page is not specified.
|
1141
|
+
If both page_no and per_page are None, then lists all the resources.
|
1142
|
+
"""
|
1143
|
+
request_data = dict(
|
1144
|
+
user_app_id=self.user_app_id,
|
1145
|
+
input_ids=[input.id for input in batch_input] if batch_input else None,
|
1146
|
+
)
|
1147
|
+
all_annotations_info = self.list_pages_generator(
|
1148
|
+
self.STUB.ListAnnotations,
|
1149
|
+
service_pb2.ListAnnotationsRequest,
|
1150
|
+
request_data,
|
1151
|
+
per_page=per_page,
|
1152
|
+
page_no=page_no,
|
1153
|
+
)
|
1154
|
+
for annotations_info in all_annotations_info:
|
1155
|
+
annotations_info['id'] = annotations_info.pop('annotation_id')
|
1156
|
+
yield Annotation(**annotations_info)
|
1157
|
+
|
1158
|
+
def _bulk_upload(self, inputs: List[Input], batch_size: int = 128) -> None:
|
1159
|
+
"""Uploads process for large number of inputs.
|
1160
|
+
|
1161
|
+
Args:
|
1162
|
+
inputs (List[Input]): input protos
|
1163
|
+
batch_size (int): batch size for each request
|
1164
|
+
"""
|
1165
|
+
num_workers: int = min(10, cpu_count()) # limit max workers to 10
|
1166
|
+
batch_size = min(128, batch_size) # limit max protos in a req
|
1167
|
+
chunked_inputs = Chunker(inputs, batch_size).chunk()
|
1168
|
+
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
1169
|
+
with tqdm(total=len(chunked_inputs), desc='Uploading inputs') as progress:
|
1170
|
+
# Submit all jobs to the executor and store the returned futures
|
1171
|
+
futures = [
|
1172
|
+
executor.submit(self._upload_batch, batch_input_ids)
|
1173
|
+
for batch_input_ids in chunked_inputs
|
1174
|
+
]
|
1175
|
+
|
1176
|
+
for job in as_completed(futures):
|
1177
|
+
retry_input_proto = job.result()
|
1178
|
+
self._retry_uploads(retry_input_proto)
|
1179
|
+
progress.update()
|
1180
|
+
|
1181
|
+
def _wait_for_inputs(self, input_job_id: str) -> bool:
|
1182
|
+
"""Wait for inputs to be processed. Cancel Job if timeout > 30 minutes.
|
1183
|
+
|
1184
|
+
Args:
|
1185
|
+
input_job_id (str): Upload Input Job ID
|
1052
1186
|
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1064
|
-
|
1065
|
-
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1069
|
-
|
1070
|
-
|
1071
|
-
|
1072
|
-
|
1073
|
-
|
1074
|
-
|
1075
|
-
|
1076
|
-
|
1077
|
-
|
1078
|
-
|
1079
|
-
|
1080
|
-
|
1081
|
-
|
1082
|
-
|
1083
|
-
|
1084
|
-
|
1085
|
-
|
1086
|
-
|
1087
|
-
|
1088
|
-
|
1089
|
-
|
1090
|
-
|
1091
|
-
|
1092
|
-
|
1093
|
-
|
1094
|
-
|
1095
|
-
|
1096
|
-
self.logger.
|
1097
|
-
|
1098
|
-
|
1099
|
-
|
1100
|
-
|
1101
|
-
|
1102
|
-
|
1103
|
-
|
1104
|
-
|
1105
|
-
|
1106
|
-
|
1107
|
-
|
1108
|
-
|
1109
|
-
|
1110
|
-
|
1111
|
-
|
1112
|
-
|
1113
|
-
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1118
|
-
|
1119
|
-
|
1120
|
-
|
1121
|
-
|
1122
|
-
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1126
|
-
|
1127
|
-
|
1128
|
-
|
1129
|
-
|
1130
|
-
|
1131
|
-
|
1132
|
-
|
1133
|
-
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1140
|
-
|
1187
|
+
Returns:
|
1188
|
+
True if inputs are processed, False otherwise
|
1189
|
+
"""
|
1190
|
+
backoff_iterator = BackoffIterator(10)
|
1191
|
+
max_retries = 10
|
1192
|
+
start_time = time.time()
|
1193
|
+
while True:
|
1194
|
+
request = service_pb2.GetInputsAddJobRequest(
|
1195
|
+
user_app_id=self.user_app_id, id=input_job_id
|
1196
|
+
)
|
1197
|
+
response = self._grpc_request(self.STUB.GetInputsAddJob, request)
|
1198
|
+
|
1199
|
+
if time.time() - start_time > 60 * 30 or max_retries == 0: # 30 minutes timeout
|
1200
|
+
self._grpc_request(
|
1201
|
+
self.STUB.CancelInputsAddJob,
|
1202
|
+
service_pb2.CancelInputsAddJobRequest(
|
1203
|
+
user_app_id=self.user_app_id, id=input_job_id
|
1204
|
+
),
|
1205
|
+
) # Cancel Job
|
1206
|
+
return False
|
1207
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
1208
|
+
max_retries -= 1
|
1209
|
+
self.logger.warning(f"Get input job failed, status: {response.status.details}\n")
|
1210
|
+
continue
|
1211
|
+
if (
|
1212
|
+
response.inputs_add_job.progress.in_progress_count == 0
|
1213
|
+
and response.inputs_add_job.progress.pending_count == 0
|
1214
|
+
):
|
1215
|
+
return True
|
1216
|
+
else:
|
1217
|
+
time.sleep(next(backoff_iterator))
|
1218
|
+
|
1219
|
+
def _retry_uploads(self, failed_inputs: List[Input]) -> None:
|
1220
|
+
"""Retry failed uploads.
|
1221
|
+
|
1222
|
+
Args:
|
1223
|
+
failed_inputs (List[Input]): failed input protos
|
1224
|
+
"""
|
1225
|
+
for _retry in range(MAX_RETRIES):
|
1226
|
+
if failed_inputs:
|
1227
|
+
self.logger.info(f"Retrying upload for {len(failed_inputs)} Failed inputs..\n")
|
1228
|
+
failed_inputs = self._upload_batch(failed_inputs)
|
1229
|
+
|
1230
|
+
self.logger.warning(f"Failed to upload {len(failed_inputs)} inputs..\n ")
|
1231
|
+
|
1232
|
+
def _delete_failed_inputs(self, inputs: List[Input]) -> List[Input]:
|
1233
|
+
"""Delete failed input ids from clarifai platform dataset.
|
1234
|
+
|
1235
|
+
Args:
|
1236
|
+
inputs (List[Input]): batch input protos
|
1237
|
+
|
1238
|
+
Returns:
|
1239
|
+
failed_inputs: failed inputs
|
1240
|
+
"""
|
1241
|
+
input_ids = [input.id for input in inputs]
|
1242
|
+
success_status = status_pb2.Status(code=status_code_pb2.INPUT_DOWNLOAD_SUCCESS)
|
1243
|
+
request = service_pb2.ListInputsRequest(
|
1244
|
+
ids=input_ids,
|
1245
|
+
per_page=len(input_ids),
|
1246
|
+
user_app_id=self.user_app_id,
|
1247
|
+
status=success_status,
|
1248
|
+
)
|
1249
|
+
response = self._grpc_request(self.STUB.ListInputs, request)
|
1250
|
+
response_dict = MessageToDict(response)
|
1251
|
+
success_inputs = response_dict.get('inputs', [])
|
1252
|
+
|
1253
|
+
success_input_ids = [input.get('id') for input in success_inputs]
|
1254
|
+
failed_inputs = [input for input in inputs if input.id not in success_input_ids]
|
1255
|
+
# delete failed inputs
|
1256
|
+
self._grpc_request(
|
1257
|
+
self.STUB.DeleteInputs,
|
1258
|
+
service_pb2.DeleteInputsRequest(
|
1259
|
+
user_app_id=self.user_app_id, ids=[input.id for input in failed_inputs]
|
1260
|
+
),
|
1261
|
+
)
|
1262
|
+
|
1263
|
+
return failed_inputs
|
1264
|
+
|
1265
|
+
def __getattr__(self, name):
|
1266
|
+
return getattr(self.input_info, name)
|
1267
|
+
|
1268
|
+
def __str__(self):
|
1269
|
+
init_params = [param for param in self.kwargs.keys()]
|
1270
|
+
attribute_strings = [
|
1271
|
+
f"{param}={getattr(self.input_info, param)}"
|
1272
|
+
for param in init_params
|
1273
|
+
if hasattr(self.input_info, param)
|
1274
|
+
]
|
1275
|
+
return f"Input Details: \n{', '.join(attribute_strings)}\n"
|