clarifai 11.4.1__py3-none-any.whl → 11.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/client/dataset.py +6 -0
- clarifai/runners/models/model_builder.py +140 -36
- clarifai/runners/models/model_class.py +5 -22
- clarifai/runners/models/model_run_locally.py +0 -4
- clarifai/runners/models/visual_classifier_class.py +75 -0
- clarifai/runners/models/visual_detector_class.py +79 -0
- clarifai/runners/utils/code_script.py +41 -44
- clarifai/runners/utils/const.py +15 -0
- clarifai/runners/utils/data_utils.py +33 -5
- clarifai/runners/utils/loader.py +23 -2
- clarifai/runners/utils/method_signatures.py +4 -4
- clarifai/utils/logging.py +22 -5
- {clarifai-11.4.1.dist-info → clarifai-11.4.2.dist-info}/METADATA +1 -2
- {clarifai-11.4.1.dist-info → clarifai-11.4.2.dist-info}/RECORD +19 -17
- {clarifai-11.4.1.dist-info → clarifai-11.4.2.dist-info}/WHEEL +1 -1
- {clarifai-11.4.1.dist-info → clarifai-11.4.2.dist-info}/entry_points.txt +0 -0
- {clarifai-11.4.1.dist-info → clarifai-11.4.2.dist-info}/licenses/LICENSE +0 -0
- {clarifai-11.4.1.dist-info → clarifai-11.4.2.dist-info}/top_level.txt +0 -0
clarifai/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "11.4.
|
1
|
+
__version__ = "11.4.2"
|
clarifai/client/dataset.py
CHANGED
@@ -685,6 +685,12 @@ class Dataset(Lister, BaseClient):
|
|
685
685
|
Note:
|
686
686
|
This is a beta feature and is subject to change.
|
687
687
|
"""
|
688
|
+
try:
|
689
|
+
import rich # noqa: F401
|
690
|
+
except ImportError:
|
691
|
+
raise UserError(
|
692
|
+
"Rich library is not installed. Please install it using pip install rich>=13.4.2"
|
693
|
+
)
|
688
694
|
self.logger.info("Getting dataset upload status...")
|
689
695
|
dataset_version_id = uuid.uuid4().hex
|
690
696
|
_ = self.create_version(id=dataset_version_id, description="SDK Upload Status")
|
@@ -14,15 +14,17 @@ import yaml
|
|
14
14
|
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
|
15
15
|
from clarifai_grpc.grpc.api.status import status_code_pb2
|
16
16
|
from google.protobuf import json_format
|
17
|
-
from rich import print
|
18
|
-
from rich.markup import escape
|
19
17
|
|
20
18
|
from clarifai.client.base import BaseClient
|
21
19
|
from clarifai.runners.models.model_class import ModelClass
|
22
20
|
from clarifai.runners.utils.const import (
|
21
|
+
AMD_PYTHON_BASE_IMAGE,
|
22
|
+
AMD_VLLM_BASE_IMAGE,
|
23
23
|
AVAILABLE_PYTHON_IMAGES,
|
24
24
|
AVAILABLE_TORCH_IMAGES,
|
25
25
|
CONCEPTS_REQUIRED_MODEL_TYPE,
|
26
|
+
DEFAULT_AMD_GPU_VERSION,
|
27
|
+
DEFAULT_AMD_TORCH_VERSION,
|
26
28
|
DEFAULT_DOWNLOAD_CHECKPOINT_WHEN,
|
27
29
|
DEFAULT_PYTHON_VERSION,
|
28
30
|
DEFAULT_RUNTIME_DOWNLOAD_PATH,
|
@@ -43,13 +45,6 @@ dependencies = [
|
|
43
45
|
]
|
44
46
|
|
45
47
|
|
46
|
-
def _clear_line(n: int = 1) -> None:
|
47
|
-
LINE_UP = '\033[1A' # Move cursor up one line
|
48
|
-
LINE_CLEAR = '\x1b[2K' # Clear the entire line
|
49
|
-
for _ in range(n):
|
50
|
-
print(LINE_UP, end=LINE_CLEAR, flush=True)
|
51
|
-
|
52
|
-
|
53
48
|
def is_related(object_class, main_class):
|
54
49
|
# Check if the object_class is a subclass of main_class
|
55
50
|
if issubclass(object_class, main_class):
|
@@ -361,13 +356,23 @@ class ModelBuilder:
|
|
361
356
|
if self.config.get("checkpoints"):
|
362
357
|
loader_type, _, hf_token, _, _, _ = self._validate_config_checkpoints()
|
363
358
|
|
364
|
-
if loader_type == "huggingface"
|
365
|
-
is_valid_token = HuggingFaceLoader.validate_hftoken(hf_token)
|
366
|
-
if not is_valid_token:
|
359
|
+
if loader_type == "huggingface":
|
360
|
+
is_valid_token = hf_token and HuggingFaceLoader.validate_hftoken(hf_token)
|
361
|
+
if not is_valid_token and hf_token:
|
362
|
+
logger.info(
|
363
|
+
"Continuing without Hugging Face token for validating config in model builder."
|
364
|
+
)
|
365
|
+
|
366
|
+
has_repo_access = HuggingFaceLoader.validate_hf_repo_access(
|
367
|
+
repo_id=self.config.get("checkpoints", {}).get("repo_id"),
|
368
|
+
token=hf_token if is_valid_token else None,
|
369
|
+
)
|
370
|
+
|
371
|
+
if not has_repo_access:
|
367
372
|
logger.error(
|
368
|
-
"Invalid Hugging Face
|
373
|
+
f"Invalid Hugging Face repo access for repo {self.config.get('checkpoints').get('repo_id')}. Please check your repo and try again."
|
369
374
|
)
|
370
|
-
|
375
|
+
sys.exit("Token does not have access to HuggingFace repo , exiting.")
|
371
376
|
|
372
377
|
num_threads = self.config.get("num_threads")
|
373
378
|
if num_threads or num_threads == 0:
|
@@ -532,6 +537,30 @@ class ModelBuilder:
|
|
532
537
|
dependencies_version[dependency] = version if version else None
|
533
538
|
return dependencies_version
|
534
539
|
|
540
|
+
def _is_amd(self):
|
541
|
+
"""
|
542
|
+
Check if the model is AMD or not.
|
543
|
+
"""
|
544
|
+
is_amd_gpu = False
|
545
|
+
is_nvidia_gpu = False
|
546
|
+
if "inference_compute_info" in self.config:
|
547
|
+
inference_compute_info = self.config.get('inference_compute_info')
|
548
|
+
if 'accelerator_type' in inference_compute_info:
|
549
|
+
for accelerator in inference_compute_info['accelerator_type']:
|
550
|
+
if 'amd' in accelerator.lower():
|
551
|
+
is_amd_gpu = True
|
552
|
+
elif 'nvidia' in accelerator.lower():
|
553
|
+
is_nvidia_gpu = True
|
554
|
+
if is_amd_gpu and is_nvidia_gpu:
|
555
|
+
raise Exception(
|
556
|
+
"Both AMD and NVIDIA GPUs are specified in the config file, please use only one type of GPU."
|
557
|
+
)
|
558
|
+
if is_amd_gpu:
|
559
|
+
logger.info("Using AMD base image to build the Docker image and upload the model")
|
560
|
+
elif is_nvidia_gpu:
|
561
|
+
logger.info("Using NVIDIA base image to build the Docker image and upload the model")
|
562
|
+
return is_amd_gpu
|
563
|
+
|
535
564
|
def create_dockerfile(self):
|
536
565
|
dockerfile_template = os.path.join(
|
537
566
|
os.path.dirname(os.path.dirname(__file__)),
|
@@ -562,30 +591,85 @@ class ModelBuilder:
|
|
562
591
|
)
|
563
592
|
python_version = DEFAULT_PYTHON_VERSION
|
564
593
|
|
565
|
-
# This is always the final image used for runtime.
|
566
|
-
final_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
|
567
|
-
downloader_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
|
568
|
-
|
569
594
|
# Parse the requirements.txt file to determine the base image
|
570
595
|
dependencies = self._parse_requirements()
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
596
|
+
|
597
|
+
is_amd_gpu = self._is_amd()
|
598
|
+
if is_amd_gpu:
|
599
|
+
final_image = AMD_PYTHON_BASE_IMAGE.format(python_version=python_version)
|
600
|
+
downloader_image = AMD_PYTHON_BASE_IMAGE.format(python_version=python_version)
|
601
|
+
if 'vllm' in dependencies:
|
602
|
+
if python_version != DEFAULT_PYTHON_VERSION:
|
603
|
+
raise Exception(
|
604
|
+
f"vLLM is not supported with Python version {python_version}, please use Python version {DEFAULT_PYTHON_VERSION} in your config.yaml"
|
605
|
+
)
|
606
|
+
torch_version = dependencies.get('torch', None)
|
607
|
+
if 'torch' in dependencies:
|
608
|
+
if python_version != DEFAULT_PYTHON_VERSION:
|
609
|
+
raise Exception(
|
610
|
+
f"torch is not supported with Python version {python_version}, please use Python version {DEFAULT_PYTHON_VERSION} in your config.yaml"
|
611
|
+
)
|
612
|
+
if not torch_version:
|
613
|
+
logger.info(
|
614
|
+
f"torch version not found in requirements.txt, using the default version {DEFAULT_AMD_TORCH_VERSION}"
|
615
|
+
)
|
616
|
+
torch_version = DEFAULT_AMD_TORCH_VERSION
|
617
|
+
if torch_version not in [DEFAULT_AMD_TORCH_VERSION]:
|
618
|
+
raise Exception(
|
619
|
+
f"torch version {torch_version} not supported, please use one of the following versions: {DEFAULT_AMD_TORCH_VERSION} in your requirements.txt"
|
620
|
+
)
|
621
|
+
python_version = DEFAULT_PYTHON_VERSION
|
622
|
+
gpu_version = DEFAULT_AMD_GPU_VERSION
|
623
|
+
final_image = AMD_VLLM_BASE_IMAGE.format(
|
624
|
+
torch_version=torch_version,
|
625
|
+
python_version=python_version,
|
626
|
+
gpu_version=gpu_version,
|
627
|
+
)
|
628
|
+
logger.info("Using vLLM base image to build the Docker image")
|
629
|
+
elif 'torch' in dependencies:
|
630
|
+
torch_version = dependencies['torch']
|
631
|
+
if python_version != DEFAULT_PYTHON_VERSION:
|
632
|
+
raise Exception(
|
633
|
+
f"torch is not supported with Python version {python_version}, please use Python version {DEFAULT_PYTHON_VERSION} in your config.yaml"
|
583
634
|
)
|
635
|
+
if not torch_version:
|
584
636
|
logger.info(
|
585
|
-
f"
|
637
|
+
f"torch version not found in requirements.txt, using the default version {DEFAULT_AMD_TORCH_VERSION}"
|
586
638
|
)
|
587
|
-
|
588
|
-
|
639
|
+
torch_version = DEFAULT_AMD_TORCH_VERSION
|
640
|
+
if torch_version not in [DEFAULT_AMD_TORCH_VERSION]:
|
641
|
+
raise Exception(
|
642
|
+
f"torch version {torch_version} not supported, please use one of the following versions: {DEFAULT_AMD_TORCH_VERSION} in your requirements.txt"
|
643
|
+
)
|
644
|
+
python_version = DEFAULT_PYTHON_VERSION
|
645
|
+
gpu_version = DEFAULT_AMD_GPU_VERSION
|
646
|
+
final_image = TORCH_BASE_IMAGE.format(
|
647
|
+
torch_version=torch_version,
|
648
|
+
python_version=python_version,
|
649
|
+
gpu_version=gpu_version,
|
650
|
+
)
|
651
|
+
logger.info(
|
652
|
+
f"Using Torch version {torch_version} base image to build the Docker image"
|
653
|
+
)
|
654
|
+
else:
|
655
|
+
final_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
|
656
|
+
downloader_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
|
657
|
+
if 'torch' in dependencies and dependencies['torch']:
|
658
|
+
torch_version = dependencies['torch']
|
659
|
+
# Sort in reverse so that newer cuda versions come first and are preferred.
|
660
|
+
for image in sorted(AVAILABLE_TORCH_IMAGES, reverse=True):
|
661
|
+
if torch_version in image and f'py{python_version}' in image:
|
662
|
+
# like cu124, rocm6.3, etc.
|
663
|
+
gpu_version = image.split('-')[-1]
|
664
|
+
final_image = TORCH_BASE_IMAGE.format(
|
665
|
+
torch_version=torch_version,
|
666
|
+
python_version=python_version,
|
667
|
+
gpu_version=gpu_version,
|
668
|
+
)
|
669
|
+
logger.info(
|
670
|
+
f"Using Torch version {torch_version} base image to build the Docker image"
|
671
|
+
)
|
672
|
+
break
|
589
673
|
if 'clarifai' not in dependencies:
|
590
674
|
raise Exception(
|
591
675
|
f"clarifai not found in requirements.txt, please add clarifai to the requirements.txt file with a fixed version. Current version is clarifai=={CLIENT_VERSION}"
|
@@ -835,7 +919,6 @@ class ModelBuilder:
|
|
835
919
|
percent_completed = response.status.percent_completed
|
836
920
|
details = response.status.details
|
837
921
|
|
838
|
-
_clear_line()
|
839
922
|
print(
|
840
923
|
f"Status: {response.status.description}, Progress: {percent_completed}% - {details} ",
|
841
924
|
f"request_id: {response.status.req_id}",
|
@@ -849,7 +932,23 @@ class ModelBuilder:
|
|
849
932
|
logger.info(f"Created Model Version ID: {self.model_version_id}")
|
850
933
|
logger.info(f"Full url to that version is: {self.model_url}")
|
851
934
|
try:
|
852
|
-
self.monitor_model_build()
|
935
|
+
is_uploaded = self.monitor_model_build()
|
936
|
+
if is_uploaded:
|
937
|
+
from clarifai.runners.utils import code_script
|
938
|
+
|
939
|
+
method_signatures = self.get_method_signatures()
|
940
|
+
snippet = code_script.generate_client_script(
|
941
|
+
method_signatures,
|
942
|
+
user_id=self.client.user_app_id.user_id,
|
943
|
+
app_id=self.client.user_app_id.app_id,
|
944
|
+
model_id=self.model_proto.id,
|
945
|
+
)
|
946
|
+
logger.info("""\n
|
947
|
+
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
948
|
+
# Here is a code snippet to call this model:
|
949
|
+
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
950
|
+
""")
|
951
|
+
logger.info(snippet)
|
853
952
|
finally:
|
854
953
|
if os.path.exists(self.tar_file):
|
855
954
|
logger.debug(f"Cleaning up upload file: {self.tar_file}")
|
@@ -933,7 +1032,12 @@ class ModelBuilder:
|
|
933
1032
|
for log_entry in logs.log_entries:
|
934
1033
|
if log_entry.url not in seen_logs:
|
935
1034
|
seen_logs.add(log_entry.url)
|
936
|
-
|
1035
|
+
log_entry_msg = re.sub(
|
1036
|
+
r"(\\*)(\[[a-z#/@][^[]*?])",
|
1037
|
+
lambda m: f"{m.group(1)}{m.group(1)}\\{m.group(2)}",
|
1038
|
+
log_entry.message.strip(),
|
1039
|
+
)
|
1040
|
+
logger.info(log_entry_msg)
|
937
1041
|
if status_code == status_code_pb2.MODEL_BUILDING:
|
938
1042
|
print(
|
939
1043
|
f"Model is building... (elapsed {time.time() - st:.1f}s)", end='\r', flush=True
|
@@ -9,7 +9,6 @@ from typing import Any, Dict, Iterator, List
|
|
9
9
|
|
10
10
|
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
|
11
11
|
from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
|
12
|
-
from google.protobuf import json_format
|
13
12
|
|
14
13
|
from clarifai.runners.utils import data_types
|
15
14
|
from clarifai.runners.utils.data_utils import DataConverter
|
@@ -100,7 +99,6 @@ class ModelClass(ABC):
|
|
100
99
|
try:
|
101
100
|
# TODO add method name field to proto
|
102
101
|
method_name = 'predict'
|
103
|
-
inference_params = get_inference_params(request)
|
104
102
|
if len(request.inputs) > 0 and '_method_name' in request.inputs[0].data.metadata:
|
105
103
|
method_name = request.inputs[0].data.metadata['_method_name']
|
106
104
|
if (
|
@@ -124,7 +122,7 @@ class ModelClass(ABC):
|
|
124
122
|
input.data.CopyFrom(new_data)
|
125
123
|
# convert inputs to python types
|
126
124
|
inputs = self._convert_input_protos_to_python(
|
127
|
-
request.inputs,
|
125
|
+
request.inputs, signature.input_fields, python_param_types
|
128
126
|
)
|
129
127
|
if len(inputs) == 1:
|
130
128
|
inputs = inputs[0]
|
@@ -163,7 +161,6 @@ class ModelClass(ABC):
|
|
163
161
|
) -> Iterator[service_pb2.MultiOutputResponse]:
|
164
162
|
try:
|
165
163
|
method_name = 'generate'
|
166
|
-
inference_params = get_inference_params(request)
|
167
164
|
if len(request.inputs) > 0 and '_method_name' in request.inputs[0].data.metadata:
|
168
165
|
method_name = request.inputs[0].data.metadata['_method_name']
|
169
166
|
method = getattr(self, method_name)
|
@@ -180,7 +177,7 @@ class ModelClass(ABC):
|
|
180
177
|
)
|
181
178
|
input.data.CopyFrom(new_data)
|
182
179
|
inputs = self._convert_input_protos_to_python(
|
183
|
-
request.inputs,
|
180
|
+
request.inputs, signature.input_fields, python_param_types
|
184
181
|
)
|
185
182
|
if len(inputs) == 1:
|
186
183
|
inputs = inputs[0]
|
@@ -226,7 +223,6 @@ class ModelClass(ABC):
|
|
226
223
|
assert len(request.inputs) == 1, "Streaming requires exactly one input"
|
227
224
|
|
228
225
|
method_name = 'stream'
|
229
|
-
inference_params = get_inference_params(request)
|
230
226
|
if len(request.inputs) > 0 and '_method_name' in request.inputs[0].data.metadata:
|
231
227
|
method_name = request.inputs[0].data.metadata['_method_name']
|
232
228
|
method = getattr(self, method_name)
|
@@ -251,7 +247,7 @@ class ModelClass(ABC):
|
|
251
247
|
input.data.CopyFrom(new_data)
|
252
248
|
# convert all inputs for the first request, including the first stream value
|
253
249
|
inputs = self._convert_input_protos_to_python(
|
254
|
-
request.inputs,
|
250
|
+
request.inputs, signature.input_fields, python_param_types
|
255
251
|
)
|
256
252
|
kwargs = inputs[0]
|
257
253
|
|
@@ -264,7 +260,7 @@ class ModelClass(ABC):
|
|
264
260
|
# subsequent streaming items contain only the streaming input
|
265
261
|
for request in request_iterator:
|
266
262
|
item = self._convert_input_protos_to_python(
|
267
|
-
request.inputs,
|
263
|
+
request.inputs, [stream_sig], python_param_types
|
268
264
|
)
|
269
265
|
item = item[0][stream_argname]
|
270
266
|
yield item
|
@@ -297,13 +293,12 @@ class ModelClass(ABC):
|
|
297
293
|
def _convert_input_protos_to_python(
|
298
294
|
self,
|
299
295
|
inputs: List[resources_pb2.Input],
|
300
|
-
inference_params: dict,
|
301
296
|
variables_signature: List[resources_pb2.ModelTypeField],
|
302
297
|
python_param_types,
|
303
298
|
) -> List[Dict[str, Any]]:
|
304
299
|
result = []
|
305
300
|
for input in inputs:
|
306
|
-
kwargs = deserialize(input.data, variables_signature
|
301
|
+
kwargs = deserialize(input.data, variables_signature)
|
307
302
|
# dynamic cast to annotated types
|
308
303
|
for k, v in kwargs.items():
|
309
304
|
if k not in python_param_types:
|
@@ -374,18 +369,6 @@ class ModelClass(ABC):
|
|
374
369
|
return method_info
|
375
370
|
|
376
371
|
|
377
|
-
# Helper function to get the inference params
|
378
|
-
def get_inference_params(request) -> dict:
|
379
|
-
"""Get the inference params from the request."""
|
380
|
-
inference_params = {}
|
381
|
-
if request.model.model_version.id != "":
|
382
|
-
output_info = request.model.model_version.output_info
|
383
|
-
output_info = json_format.MessageToDict(output_info, preserving_proto_field_name=True)
|
384
|
-
if "params" in output_info:
|
385
|
-
inference_params = output_info["params"]
|
386
|
-
return inference_params
|
387
|
-
|
388
|
-
|
389
372
|
class _MethodInfo:
|
390
373
|
def __init__(self, method):
|
391
374
|
self.name = method.__name__
|
@@ -442,10 +442,6 @@ def main(
|
|
442
442
|
manager = ModelRunLocally(model_path)
|
443
443
|
# get whatever stage is in config.yaml to force download now
|
444
444
|
# also always write to where upload/build wants to, not the /tmp folder that runtime stage uses
|
445
|
-
_, _, _, when, _, _ = manager.builder._validate_config_checkpoints()
|
446
|
-
manager.builder.download_checkpoints(
|
447
|
-
stage=when, checkpoint_path_override=manager.builder.checkpoint_path
|
448
|
-
)
|
449
445
|
if inside_container:
|
450
446
|
if not manager.is_docker_installed():
|
451
447
|
sys.exit(1)
|
@@ -0,0 +1,75 @@
|
|
1
|
+
import os
|
2
|
+
import tempfile
|
3
|
+
from io import BytesIO
|
4
|
+
from typing import Dict, Iterator, List
|
5
|
+
|
6
|
+
import cv2
|
7
|
+
import torch
|
8
|
+
from PIL import Image as PILImage
|
9
|
+
|
10
|
+
from clarifai.runners.models.model_class import ModelClass
|
11
|
+
from clarifai.runners.utils.data_types import Concept, Frame, Image
|
12
|
+
from clarifai.utils.logging import logger
|
13
|
+
|
14
|
+
|
15
|
+
class VisualClassifierClass(ModelClass):
|
16
|
+
"""Base class for visual classification models supporting image and video processing."""
|
17
|
+
|
18
|
+
@staticmethod
|
19
|
+
def preprocess_image(image_bytes: bytes) -> PILImage:
|
20
|
+
"""Convert image bytes to PIL Image."""
|
21
|
+
return PILImage.open(BytesIO(image_bytes)).convert("RGB")
|
22
|
+
|
23
|
+
@staticmethod
|
24
|
+
def video_to_frames(video_bytes: bytes) -> Iterator[Frame]:
|
25
|
+
"""Convert video bytes to frames.
|
26
|
+
|
27
|
+
Args:
|
28
|
+
video_bytes: Raw video data in bytes
|
29
|
+
|
30
|
+
Yields:
|
31
|
+
Frame with JPEG encoded frame data as bytes and timestamp in milliseconds
|
32
|
+
"""
|
33
|
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_video_file:
|
34
|
+
temp_video_file.write(video_bytes)
|
35
|
+
temp_video_path = temp_video_file.name
|
36
|
+
logger.debug(f"temp_video_path: {temp_video_path}")
|
37
|
+
|
38
|
+
video = cv2.VideoCapture(temp_video_path)
|
39
|
+
logger.debug(f"video opened: {video.isOpened()}")
|
40
|
+
|
41
|
+
while video.isOpened():
|
42
|
+
ret, frame = video.read()
|
43
|
+
if not ret:
|
44
|
+
break
|
45
|
+
# Get frame timestamp in milliseconds
|
46
|
+
timestamp_ms = video.get(cv2.CAP_PROP_POS_MSEC)
|
47
|
+
frame_bytes = cv2.imencode('.jpg', frame)[1].tobytes()
|
48
|
+
yield Frame(image=Image(bytes=frame_bytes), time=timestamp_ms)
|
49
|
+
|
50
|
+
video.release()
|
51
|
+
os.unlink(temp_video_path)
|
52
|
+
|
53
|
+
@staticmethod
|
54
|
+
def process_concepts(
|
55
|
+
logits: torch.Tensor, threshold: float, model_labels: Dict[int, str]
|
56
|
+
) -> List[List[Concept]]:
|
57
|
+
"""Convert model logits into a structured format of concepts.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
logits: Model output logits as a tensor (batch_size x num_classes)
|
61
|
+
model_labels: Dictionary mapping label indices to label names
|
62
|
+
|
63
|
+
Returns:
|
64
|
+
List of lists containing Concept objects for each input in the batch
|
65
|
+
"""
|
66
|
+
outputs = []
|
67
|
+
for logit in logits:
|
68
|
+
probs = torch.softmax(logit, dim=-1)
|
69
|
+
sorted_indices = torch.argsort(probs, dim=-1, descending=True)
|
70
|
+
output_concepts = []
|
71
|
+
for idx in sorted_indices:
|
72
|
+
concept = Concept(name=model_labels[idx.item()], value=probs[idx].item())
|
73
|
+
output_concepts.append(concept)
|
74
|
+
outputs.append(output_concepts)
|
75
|
+
return outputs
|
@@ -0,0 +1,79 @@
|
|
1
|
+
import os
|
2
|
+
import tempfile
|
3
|
+
from io import BytesIO
|
4
|
+
from typing import Dict, Iterator, List
|
5
|
+
|
6
|
+
import cv2
|
7
|
+
import torch
|
8
|
+
from PIL import Image as PILImage
|
9
|
+
|
10
|
+
from clarifai.runners.models.model_class import ModelClass
|
11
|
+
from clarifai.runners.utils.data_types import Concept, Frame, Image, Region
|
12
|
+
from clarifai.utils.logging import logger
|
13
|
+
|
14
|
+
|
15
|
+
class VisualDetectorClass(ModelClass):
|
16
|
+
"""Base class for visual detection models supporting image and video processing."""
|
17
|
+
|
18
|
+
@staticmethod
|
19
|
+
def preprocess_image(image_bytes: bytes) -> PILImage:
|
20
|
+
"""Convert image bytes to PIL Image."""
|
21
|
+
return PILImage.open(BytesIO(image_bytes)).convert("RGB")
|
22
|
+
|
23
|
+
@staticmethod
|
24
|
+
def video_to_frames(video_bytes: bytes) -> Iterator[Frame]:
|
25
|
+
"""Convert video bytes to frames.
|
26
|
+
|
27
|
+
Args:
|
28
|
+
video_bytes: Raw video data in bytes
|
29
|
+
|
30
|
+
Yields:
|
31
|
+
Frame with JPEG encoded frame data as bytes and timestamp in milliseconds
|
32
|
+
"""
|
33
|
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_video_file:
|
34
|
+
temp_video_file.write(video_bytes)
|
35
|
+
temp_video_path = temp_video_file.name
|
36
|
+
logger.debug(f"temp_video_path: {temp_video_path}")
|
37
|
+
|
38
|
+
video = cv2.VideoCapture(temp_video_path)
|
39
|
+
logger.debug(f"video opened: {video.isOpened()}")
|
40
|
+
|
41
|
+
while video.isOpened():
|
42
|
+
ret, frame = video.read()
|
43
|
+
if not ret:
|
44
|
+
break
|
45
|
+
# Get frame timestamp in milliseconds
|
46
|
+
timestamp_ms = video.get(cv2.CAP_PROP_POS_MSEC)
|
47
|
+
frame_bytes = cv2.imencode('.jpg', frame)[1].tobytes()
|
48
|
+
yield Frame(image=Image(bytes=frame_bytes), time=timestamp_ms)
|
49
|
+
|
50
|
+
video.release()
|
51
|
+
os.unlink(temp_video_path)
|
52
|
+
|
53
|
+
@staticmethod
|
54
|
+
def process_detections(
|
55
|
+
results: List[Dict[str, torch.Tensor]], threshold: float, model_labels: Dict[int, str]
|
56
|
+
) -> List[List[Region]]:
|
57
|
+
"""Convert model outputs into a structured format of detections.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
results: Raw detection results from model
|
61
|
+
threshold: Confidence threshold for detections
|
62
|
+
model_labels: Dictionary mapping label indices to names
|
63
|
+
|
64
|
+
Returns:
|
65
|
+
List of lists containing Region objects for each detection
|
66
|
+
"""
|
67
|
+
outputs = []
|
68
|
+
for result in results:
|
69
|
+
detections = []
|
70
|
+
for score, label_idx, box in zip(result["scores"], result["labels"], result["boxes"]):
|
71
|
+
if score > threshold:
|
72
|
+
label = model_labels[label_idx.item()]
|
73
|
+
detections.append(
|
74
|
+
Region(
|
75
|
+
box=box.tolist(), concepts=[Concept(name=label, value=score.item())]
|
76
|
+
)
|
77
|
+
)
|
78
|
+
outputs.append(detections)
|
79
|
+
return outputs
|
@@ -3,7 +3,7 @@ from typing import List
|
|
3
3
|
|
4
4
|
from clarifai_grpc.grpc.api import resources_pb2
|
5
5
|
|
6
|
-
from clarifai.runners.utils import
|
6
|
+
from clarifai.runners.utils import data_utils
|
7
7
|
|
8
8
|
|
9
9
|
def generate_client_script(
|
@@ -35,19 +35,12 @@ from clarifai.runners.utils import data_types
|
|
35
35
|
model_section = """
|
36
36
|
model = Model.from_current_context()"""
|
37
37
|
else:
|
38
|
-
model_section = """
|
39
|
-
|
38
|
+
model_section = f"""
|
39
|
+
model = Model("https://clarifai.com/{user_id}/{app_id}/models/{model_id}",
|
40
40
|
deployment_id = {deployment_id}, # Only needed for dedicated deployed models
|
41
41
|
{base_url_str}
|
42
42
|
)
|
43
43
|
"""
|
44
|
-
model_section = _CLIENT_TEMPLATE.format(
|
45
|
-
user_id=user_id,
|
46
|
-
app_id=app_id,
|
47
|
-
model_id=model_id,
|
48
|
-
deployment_id=deployment_id,
|
49
|
-
base_url_str=base_url_str,
|
50
|
-
)
|
51
44
|
|
52
45
|
# Generate client template
|
53
46
|
client_template = _CLIENT_TEMPLATE.format(
|
@@ -58,28 +51,24 @@ model = Model.from_current_context()"""
|
|
58
51
|
method_signatures_str = []
|
59
52
|
for method_signature in method_signatures:
|
60
53
|
method_name = method_signature.name
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
client_script_str
|
77
|
-
|
78
|
-
|
79
|
-
elif method_signature.method_type == resources_pb2.RunnerMethodType.UNARY_STREAMING:
|
80
|
-
client_script_str += "\nfor res in response:\n print(res)"
|
81
|
-
client_script_str += "\n"
|
82
|
-
method_signatures_str.append(client_script_str)
|
54
|
+
client_script_str = f'response = model.{method_name}('
|
55
|
+
annotations = _get_annotations_source(method_signature)
|
56
|
+
for param_name, (param_type, default_value) in annotations.items():
|
57
|
+
print(
|
58
|
+
f"param_name: {param_name}, param_type: {param_type}, default_value: {default_value}"
|
59
|
+
)
|
60
|
+
if param_name == "return":
|
61
|
+
continue
|
62
|
+
if default_value is None:
|
63
|
+
default_value = _set_default_value(param_type)
|
64
|
+
client_script_str += f"{param_name}={default_value}, "
|
65
|
+
client_script_str = client_script_str.rstrip(", ") + ")"
|
66
|
+
if method_signature.method_type == resources_pb2.RunnerMethodType.UNARY_UNARY:
|
67
|
+
client_script_str += "\nprint(response)"
|
68
|
+
elif method_signature.method_type == resources_pb2.RunnerMethodType.UNARY_STREAMING:
|
69
|
+
client_script_str += "\nfor res in response:\n print(res)"
|
70
|
+
client_script_str += "\n"
|
71
|
+
method_signatures_str.append(client_script_str)
|
83
72
|
|
84
73
|
method_signatures_str = "\n".join(method_signatures_str)
|
85
74
|
# Combine all parts
|
@@ -107,9 +96,8 @@ def _get_annotations_source(method_signature: resources_pb2.MethodSignature) ->
|
|
107
96
|
if input_field.iterator:
|
108
97
|
param_type = f"Iterator[{param_type}]"
|
109
98
|
default_value = None
|
110
|
-
if input_field
|
99
|
+
if data_utils.Param.get_default(input_field):
|
111
100
|
default_value = _parse_default_value(input_field)
|
112
|
-
|
113
101
|
annotations[param_name] = (param_type, default_value)
|
114
102
|
if not method_signature.output_fields:
|
115
103
|
raise ValueError("MethodSignature must have at least one output field")
|
@@ -177,23 +165,21 @@ def _map_default_value(field_type):
|
|
177
165
|
elif field_type == "bool":
|
178
166
|
default_value = False
|
179
167
|
elif field_type == "data_types.Image":
|
180
|
-
default_value = data_types.Image.from_url("https://samples.clarifai.com/metro-north.jpg")
|
168
|
+
default_value = 'data_types.Image.from_url("https://samples.clarifai.com/metro-north.jpg")'
|
181
169
|
elif field_type == "data_types.Text":
|
182
|
-
default_value = data_types.Text("What
|
170
|
+
default_value = 'data_types.Text("What is the future of AI?")'
|
183
171
|
elif field_type == "data_types.Audio":
|
184
|
-
default_value = data_types.Audio.from_url("https://samples.clarifai.com/audio.mp3")
|
172
|
+
default_value = 'data_types.Audio.from_url("https://samples.clarifai.com/audio.mp3")'
|
185
173
|
elif field_type == "data_types.Video":
|
186
|
-
default_value = data_types.Video.from_url("https://samples.clarifai.com/video.mp4")
|
174
|
+
default_value = 'data_types.Video.from_url("https://samples.clarifai.com/video.mp4")'
|
187
175
|
elif field_type == "data_types.Concept":
|
188
|
-
default_value = data_types.Concept(id="concept_id", name="dog", value=0.95)
|
176
|
+
default_value = 'data_types.Concept(id="concept_id", name="dog", value=0.95)'
|
189
177
|
elif field_type == "data_types.Region":
|
190
|
-
default_value = data_types.Region(
|
191
|
-
box=[0.1, 0.1, 0.5, 0.5],
|
192
|
-
)
|
178
|
+
default_value = 'data_types.Region(box=[0.1, 0.1, 0.5, 0.5],)'
|
193
179
|
elif field_type == "data_types.Frame":
|
194
|
-
default_value = data_types.Frame.from_url("https://samples.clarifai.com/video.mp4", 0)
|
180
|
+
default_value = 'data_types.Frame.from_url("https://samples.clarifai.com/video.mp4", 0)'
|
195
181
|
elif field_type == "data_types.NDArray":
|
196
|
-
default_value = data_types.NDArray([1, 2, 3])
|
182
|
+
default_value = 'data_types.NDArray([1, 2, 3])'
|
197
183
|
else:
|
198
184
|
default_value = None
|
199
185
|
return default_value
|
@@ -203,6 +189,12 @@ def _set_default_value(field_type):
|
|
203
189
|
"""
|
204
190
|
Set the default value of a field if it is not set.
|
205
191
|
"""
|
192
|
+
is_iterator = False
|
193
|
+
print(f"before field_type: {field_type}")
|
194
|
+
if field_type.startswith("Iterator["):
|
195
|
+
is_iterator = True
|
196
|
+
field_type = field_type[9:-1]
|
197
|
+
print(f"after field_type: {field_type}")
|
206
198
|
default_value = None
|
207
199
|
default_value = _map_default_value(field_type)
|
208
200
|
if field_type.startswith("List["):
|
@@ -219,6 +211,11 @@ def _set_default_value(field_type):
|
|
219
211
|
element_type_defaults = [_map_default_value(et) for et in element_types]
|
220
212
|
default_value = f"{{{', '.join([str(et) for et in element_type_defaults])}}}"
|
221
213
|
|
214
|
+
if field_type == 'str':
|
215
|
+
default_value = repr(default_value)
|
216
|
+
if is_iterator:
|
217
|
+
default_value = f'iter([{default_value}])'
|
218
|
+
print(f"after default_value: {default_value}")
|
222
219
|
return default_value
|
223
220
|
|
224
221
|
|
clarifai/runners/utils/const.py
CHANGED
@@ -4,14 +4,28 @@ registry = os.environ.get('CLARIFAI_BASE_IMAGE_REGISTRY', 'public.ecr.aws/clarif
|
|
4
4
|
|
5
5
|
GIT_SHA = "b8ae56bf3b7c95e686ca002b07ca83d259c716eb"
|
6
6
|
|
7
|
+
AMD_GIT_SHA = "81e942130173f54927e7c9a65aabc7e32780616d"
|
8
|
+
|
7
9
|
PYTHON_BASE_IMAGE = registry + '/python-base:{python_version}-' + GIT_SHA
|
8
10
|
TORCH_BASE_IMAGE = registry + '/torch:{torch_version}-py{python_version}-{gpu_version}-' + GIT_SHA
|
9
11
|
|
12
|
+
AMD_PYTHON_BASE_IMAGE = registry + '/amd-python-base:{python_version}-' + AMD_GIT_SHA
|
13
|
+
AMD_TORCH_BASE_IMAGE = (
|
14
|
+
registry + '/amd-torch:{torch_version}-py{python_version}-{gpu_version}-' + AMD_GIT_SHA
|
15
|
+
)
|
16
|
+
AMD_VLLM_BASE_IMAGE = (
|
17
|
+
registry + '/amd-vllm:{torch_version}-py{python_version}-{gpu_version}-' + AMD_GIT_SHA
|
18
|
+
)
|
19
|
+
|
10
20
|
# List of available python base images
|
11
21
|
AVAILABLE_PYTHON_IMAGES = ['3.11', '3.12']
|
12
22
|
|
13
23
|
DEFAULT_PYTHON_VERSION = 3.12
|
14
24
|
|
25
|
+
DEFAULT_AMD_TORCH_VERSION = '2.8.0.dev20250511+rocm6.4'
|
26
|
+
|
27
|
+
DEFAULT_AMD_GPU_VERSION = 'rocm6.4'
|
28
|
+
|
15
29
|
# By default we download at runtime.
|
16
30
|
DEFAULT_DOWNLOAD_CHECKPOINT_WHEN = "runtime"
|
17
31
|
|
@@ -29,6 +43,7 @@ AVAILABLE_TORCH_IMAGES = [
|
|
29
43
|
'2.7.0-py3.12-cu128',
|
30
44
|
'2.7.0-py3.12-rocm6.3',
|
31
45
|
]
|
46
|
+
|
32
47
|
CONCEPTS_REQUIRED_MODEL_TYPE = [
|
33
48
|
'visual-classifier',
|
34
49
|
'visual-detector',
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import json
|
1
2
|
import math
|
2
3
|
import operator
|
3
4
|
from io import BytesIO
|
@@ -64,7 +65,7 @@ class Param(MessageData):
|
|
64
65
|
|
65
66
|
def __init__(
|
66
67
|
self,
|
67
|
-
default
|
68
|
+
default,
|
68
69
|
description=None,
|
69
70
|
min_value=None,
|
70
71
|
max_value=None,
|
@@ -77,6 +78,7 @@ class Param(MessageData):
|
|
77
78
|
self.max_value = max_value
|
78
79
|
self.choices = choices
|
79
80
|
self.is_param = is_param
|
81
|
+
self._patch_encoder()
|
80
82
|
|
81
83
|
def __repr__(self) -> str:
|
82
84
|
attrs = []
|
@@ -153,6 +155,16 @@ class Param(MessageData):
|
|
153
155
|
def __ge__(self, other):
|
154
156
|
return self.default >= other
|
155
157
|
|
158
|
+
def __getattribute__(self, name):
|
159
|
+
"""Intercept attribute access to mimic default value behavior"""
|
160
|
+
try:
|
161
|
+
# First try to get Param attributes normally
|
162
|
+
return object.__getattribute__(self, name)
|
163
|
+
except AttributeError:
|
164
|
+
# Fall back to the default value's attributes
|
165
|
+
default = object.__getattribute__(self, 'default')
|
166
|
+
return getattr(default, name)
|
167
|
+
|
156
168
|
# Arithmetic operators – # arithmetic & bitwise operators – auto-generated
|
157
169
|
_arith_ops = {
|
158
170
|
"__add__": operator.add,
|
@@ -169,7 +181,6 @@ class Param(MessageData):
|
|
169
181
|
"__rshift__": operator.rshift,
|
170
182
|
}
|
171
183
|
|
172
|
-
# Create both left- and right-hand versions of each operator
|
173
184
|
for _name, _op in _arith_ops.items():
|
174
185
|
|
175
186
|
def _make(op):
|
@@ -243,6 +254,24 @@ class Param(MessageData):
|
|
243
254
|
return self
|
244
255
|
return self.default
|
245
256
|
|
257
|
+
def __json__(self):
|
258
|
+
return self.default if not hasattr(self.default, '__json__') else self.default.__json__()
|
259
|
+
|
260
|
+
@classmethod
|
261
|
+
def _patch_encoder(cls):
|
262
|
+
# only patch once
|
263
|
+
if getattr(json.JSONEncoder, "_user_patched", False):
|
264
|
+
return
|
265
|
+
original = json.JSONEncoder.default
|
266
|
+
|
267
|
+
def default(self, obj):
|
268
|
+
if isinstance(obj, Param):
|
269
|
+
return obj.__json__()
|
270
|
+
return original(self, obj)
|
271
|
+
|
272
|
+
json.JSONEncoder.default = default
|
273
|
+
json.JSONEncoder._user_patched = True
|
274
|
+
|
246
275
|
def to_proto(self, proto=None) -> ParamProto:
|
247
276
|
if proto is None:
|
248
277
|
proto = ParamProto()
|
@@ -254,7 +283,7 @@ class Param(MessageData):
|
|
254
283
|
option = ModelTypeEnumOption(id=str(choice))
|
255
284
|
proto.model_type_enum_options.append(option)
|
256
285
|
|
257
|
-
proto.required =
|
286
|
+
proto.required = False
|
258
287
|
|
259
288
|
if self.min_value is not None or self.max_value is not None:
|
260
289
|
range_info = ModelTypeRangeInfo()
|
@@ -324,8 +353,7 @@ class Param(MessageData):
|
|
324
353
|
|
325
354
|
if proto is None:
|
326
355
|
proto = ParamProto()
|
327
|
-
|
328
|
-
proto.default = json.dumps(default)
|
356
|
+
proto.default = json.dumps(default)
|
329
357
|
return proto
|
330
358
|
except Exception:
|
331
359
|
if default is not None:
|
clarifai/runners/utils/loader.py
CHANGED
@@ -41,7 +41,7 @@ class HuggingFaceLoader:
|
|
41
41
|
return True
|
42
42
|
except Exception as e:
|
43
43
|
logger.error(
|
44
|
-
f"
|
44
|
+
f"Invalid Hugging Face token provided in the config file, this might cause issues with downloading the restricted model checkpoints. Failed reason: {e}"
|
45
45
|
)
|
46
46
|
return False
|
47
47
|
|
@@ -63,7 +63,6 @@ class HuggingFaceLoader:
|
|
63
63
|
try:
|
64
64
|
is_hf_model_exists = self.validate_hf_model()
|
65
65
|
if not is_hf_model_exists:
|
66
|
-
logger.error("Model %s not found on Hugging Face" % (self.repo_id))
|
67
66
|
return False
|
68
67
|
|
69
68
|
self.ignore_patterns = self._get_ignore_patterns()
|
@@ -205,6 +204,28 @@ class HuggingFaceLoader:
|
|
205
204
|
]
|
206
205
|
return self.ignore_patterns
|
207
206
|
|
207
|
+
@classmethod
|
208
|
+
def validate_hf_repo_access(cls, repo_id: str, token: str = None) -> bool:
|
209
|
+
# check if model exists on HF
|
210
|
+
try:
|
211
|
+
from huggingface_hub import auth_check
|
212
|
+
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
|
213
|
+
except ImportError:
|
214
|
+
raise ImportError(cls.HF_DOWNLOAD_TEXT)
|
215
|
+
|
216
|
+
try:
|
217
|
+
auth_check(repo_id, token=token)
|
218
|
+
logger.info("Hugging Face repo access validated")
|
219
|
+
return True
|
220
|
+
except GatedRepoError:
|
221
|
+
logger.error(
|
222
|
+
"Hugging Face repo is gated. Please make sure you have access to the repo."
|
223
|
+
)
|
224
|
+
return False
|
225
|
+
except RepositoryNotFoundError:
|
226
|
+
logger.error("Hugging Face repo not found. Please make sure the repo exists.")
|
227
|
+
return False
|
228
|
+
|
208
229
|
@staticmethod
|
209
230
|
def validate_config(checkpoint_path: str):
|
210
231
|
# check if downloaded config.json exists
|
@@ -302,6 +302,9 @@ def serialize(kwargs, signatures, proto=None, is_output=False):
|
|
302
302
|
raise TypeError(f'Missing required argument: {sig.name}')
|
303
303
|
continue # skip missing fields, they can be set to default on the server
|
304
304
|
data = kwargs[sig.name]
|
305
|
+
default = data_utils.Param.get_default(sig)
|
306
|
+
if data is None and default is None:
|
307
|
+
continue
|
305
308
|
serializer = serializer_from_signature(sig)
|
306
309
|
# TODO determine if any (esp the first) var can go in the proto without parts
|
307
310
|
# and whether to put this in the signature or dynamically determine it
|
@@ -312,7 +315,7 @@ def serialize(kwargs, signatures, proto=None, is_output=False):
|
|
312
315
|
return proto
|
313
316
|
|
314
317
|
|
315
|
-
def deserialize(proto, signatures,
|
318
|
+
def deserialize(proto, signatures, is_output=False):
|
316
319
|
'''
|
317
320
|
Deserialize the given proto into kwargs using the given signatures.
|
318
321
|
'''
|
@@ -323,11 +326,8 @@ def deserialize(proto, signatures, inference_params={}, is_output=False):
|
|
323
326
|
for sig_i, sig in enumerate(signatures):
|
324
327
|
serializer = serializer_from_signature(sig)
|
325
328
|
part = parts_by_name.get(sig.name)
|
326
|
-
inference_params_value = inference_params.get(sig.name)
|
327
329
|
if part is not None:
|
328
330
|
kwargs[sig.name] = serializer.deserialize(part.data)
|
329
|
-
elif inference_params_value is not None:
|
330
|
-
kwargs[sig.name] = inference_params_value
|
331
331
|
else:
|
332
332
|
if sig_i == 0:
|
333
333
|
# possible inlined first value
|
clarifai/utils/logging.py
CHANGED
@@ -10,6 +10,8 @@ import traceback
|
|
10
10
|
from collections import defaultdict
|
11
11
|
from typing import Any, Dict, List, Optional, Union
|
12
12
|
|
13
|
+
from clarifai.errors import UserError
|
14
|
+
|
13
15
|
# The default logger to use throughout the SDK is defined at bottom of this file.
|
14
16
|
|
15
17
|
# For the json logger.
|
@@ -80,8 +82,13 @@ def get_req_id_from_context():
|
|
80
82
|
|
81
83
|
def display_workflow_tree(nodes_data: List[Dict]) -> None:
|
82
84
|
"""Displays a tree of the workflow nodes."""
|
83
|
-
|
84
|
-
|
85
|
+
try:
|
86
|
+
from rich import print as rprint
|
87
|
+
from rich.tree import Tree
|
88
|
+
except ImportError:
|
89
|
+
raise UserError(
|
90
|
+
"Rich library is not installed. Please install it using pip install rich>=13.4.2"
|
91
|
+
)
|
85
92
|
|
86
93
|
# Create a mapping of node_id to the list of node_ids that are connected to it.
|
87
94
|
node_adj_mapping = defaultdict(list)
|
@@ -131,7 +138,12 @@ def display_workflow_tree(nodes_data: List[Dict]) -> None:
|
|
131
138
|
|
132
139
|
def table_from_dict(data: List[Dict], column_names: List[str], title: str = "") -> 'rich.Table': # noqa F821
|
133
140
|
"""Use this function for printing tables from a list of dicts."""
|
134
|
-
|
141
|
+
try:
|
142
|
+
from rich.table import Table
|
143
|
+
except ImportError:
|
144
|
+
raise UserError(
|
145
|
+
"Rich library is not installed. Please install it using pip install rich>=13.4.2"
|
146
|
+
)
|
135
147
|
|
136
148
|
table = Table(title=title, show_lines=False, show_header=True, header_style="blue")
|
137
149
|
for column_name in column_names:
|
@@ -233,8 +245,13 @@ def display_concept_relations_tree(relations_dict: Dict[str, Any]) -> None:
|
|
233
245
|
Args:
|
234
246
|
relations_dict (dict): A dict of concept relations info.
|
235
247
|
"""
|
236
|
-
|
237
|
-
|
248
|
+
try:
|
249
|
+
from rich import print as rprint
|
250
|
+
from rich.tree import Tree
|
251
|
+
except ImportError:
|
252
|
+
raise UserError(
|
253
|
+
"Rich library is not installed. Please install it using pip install rich>=13.4.2"
|
254
|
+
)
|
238
255
|
|
239
256
|
for parent, children in relations_dict.items():
|
240
257
|
tree = Tree(parent)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: clarifai
|
3
|
-
Version: 11.4.
|
3
|
+
Version: 11.4.2
|
4
4
|
Home-page: https://github.com/Clarifai/clarifai-python
|
5
5
|
Author: Clarifai
|
6
6
|
Author-email: support@clarifai.com
|
@@ -23,7 +23,6 @@ Requires-Dist: clarifai-grpc>=11.3.4
|
|
23
23
|
Requires-Dist: clarifai-protocol>=0.0.23
|
24
24
|
Requires-Dist: numpy>=1.22.0
|
25
25
|
Requires-Dist: tqdm>=4.65.0
|
26
|
-
Requires-Dist: rich>=13.4.2
|
27
26
|
Requires-Dist: PyYAML>=6.0.1
|
28
27
|
Requires-Dist: schema==0.7.5
|
29
28
|
Requires-Dist: Pillow>=9.5.0
|
@@ -1,4 +1,4 @@
|
|
1
|
-
clarifai/__init__.py,sha256=
|
1
|
+
clarifai/__init__.py,sha256=pSlLOPfmtDJ3YrQTjGkuoViOvxNtsJ3x0m9xPOysJPQ,23
|
2
2
|
clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
clarifai/errors.py,sha256=GXa6D4v_L404J83jnRNFPH7s-1V9lk7w6Ws99f1g-AY,2772
|
4
4
|
clarifai/versions.py,sha256=ecSuEB_nOL2XSoYHDw2n23XUbm_KPOGjudMXmQrGdS8,224
|
@@ -14,7 +14,7 @@ clarifai/client/__init__.py,sha256=NhpNFRJY6mTi8ca-5hUeTEmYeDKHDNXY48FN63pDuos,7
|
|
14
14
|
clarifai/client/app.py,sha256=LmIz06Tf1SVNd5SdLV5iqttEiY1x_8r5zX85MCfuXlo,41344
|
15
15
|
clarifai/client/base.py,sha256=zOmB5HJP_-NmF2BPka14W7VUeJ1OF-fNxeacLsaRj3E,8775
|
16
16
|
clarifai/client/compute_cluster.py,sha256=Tf4Svvx96pC2dMwdtpUsMiope-Nq941dpOViH4uDx5k,10218
|
17
|
-
clarifai/client/dataset.py,sha256=
|
17
|
+
clarifai/client/dataset.py,sha256=lsHrY2yxN_zfTGWzE_Gp7W_oi2G6BW2kPvAmKv6FyVE,35263
|
18
18
|
clarifai/client/deployment.py,sha256=MQ3Kd8Ldp_JTC1hqhRqPiHe0RVUjb6iBY5DiA2Q7NxA,2839
|
19
19
|
clarifai/client/input.py,sha256=Oswf3sp8PLs39jGIqfbJ89afh9GTCSsaQ--lYPtEtRo,51169
|
20
20
|
clarifai/client/lister.py,sha256=1YEm2suNxPaJO4x9V5szgD_YX6N_00vgSO-7m0HagY8,2208
|
@@ -67,17 +67,19 @@ clarifai/runners/__init__.py,sha256=cDJ31l41dDsqW4Xn6sFMkKxxdIMTnGH9IW6sVkq0TNw,
|
|
67
67
|
clarifai/runners/server.py,sha256=9qVAs8pRHmtyY0RCNIQ1uP8nqDADIFZ03LnkoDt1h4U,4692
|
68
68
|
clarifai/runners/dockerfile_template/Dockerfile.template,sha256=5cjv7U8PmWa3DB_5B1CqSYh_6GE0E0np52TIAa7EIDE,2312
|
69
69
|
clarifai/runners/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
70
|
-
clarifai/runners/models/model_builder.py,sha256=
|
71
|
-
clarifai/runners/models/model_class.py,sha256=
|
72
|
-
clarifai/runners/models/model_run_locally.py,sha256=
|
70
|
+
clarifai/runners/models/model_builder.py,sha256=8Rj5nxDUa_jpDBQ04tD0hztenzXKcx2OEg4ce9ELY4I,48237
|
71
|
+
clarifai/runners/models/model_class.py,sha256=OHVd0tMOXDyl9v1vWeHOmYGx_dvP77N4zlLGMyTakag,15575
|
72
|
+
clarifai/runners/models/model_run_locally.py,sha256=6-6WjEKc0ba3gAv4wOLdMs2XOzS3b-2bZHJS0wdVqJY,20088
|
73
73
|
clarifai/runners/models/model_runner.py,sha256=SccX-RxTgruSpQaM21uMSl-z1x6fOa13fQZMQW8NNRY,7297
|
74
74
|
clarifai/runners/models/model_servicer.py,sha256=rRd_fNEXwqiBSzTUtPI2r07EBdcCPd8tcSPHeqTe0_I,3445
|
75
|
+
clarifai/runners/models/visual_classifier_class.py,sha256=f9ZP8KFamMUdMpUG3AlL9nVCdcggy_E5n9RJY3ixR1U,2739
|
76
|
+
clarifai/runners/models/visual_detector_class.py,sha256=ky4oFAkGCKPpGPdgaOso-n6D3HcmnbKee_8hBsNiV8U,2883
|
75
77
|
clarifai/runners/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
76
|
-
clarifai/runners/utils/code_script.py,sha256=
|
77
|
-
clarifai/runners/utils/const.py,sha256=
|
78
|
-
clarifai/runners/utils/data_utils.py,sha256=
|
79
|
-
clarifai/runners/utils/loader.py,sha256=
|
80
|
-
clarifai/runners/utils/method_signatures.py,sha256=
|
78
|
+
clarifai/runners/utils/code_script.py,sha256=M3S8c5XBMdY_VFsirLxdFRrC-PCqk1Tul0_qbm_89l4,10394
|
79
|
+
clarifai/runners/utils/const.py,sha256=Q4Ps6gIEJCyTdQCfmT6PaS61WHmhT25XigV1NugWz-E,1544
|
80
|
+
clarifai/runners/utils/data_utils.py,sha256=phrqojg3zhKDZxgXFdxL0NVLkuV0abbOLOSQPIGPPVI,19744
|
81
|
+
clarifai/runners/utils/loader.py,sha256=K5Y8MPbIe5STw2gDnrL8KqFgKNxEo7bz-RV0ip1T4PM,10900
|
82
|
+
clarifai/runners/utils/method_signatures.py,sha256=3EqrTLxynaBC4clj23iw9fZApFDQeapCzVlre6uybbI,19152
|
81
83
|
clarifai/runners/utils/openai_convertor.py,sha256=QhbytqGU856gEFggaamZy01hhCwjiWz8ju48iubvbeI,5074
|
82
84
|
clarifai/runners/utils/serializers.py,sha256=pI7GqMTC0T3Lu_X8v8TO4RiplO-gC_49Ns37jYwsPtg,7908
|
83
85
|
clarifai/runners/utils/url_fetcher.py,sha256=Segkvi-ktPa3-koOpUu8DNZeWOaK6G82Ya9b7_oIKwo,1778
|
@@ -89,7 +91,7 @@ clarifai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
89
91
|
clarifai/utils/cli.py,sha256=7lHajIsWzyEU7jfgH1nykwYG63wcHCZ3ep7a6amWZH4,5413
|
90
92
|
clarifai/utils/config.py,sha256=-mZwJEtv31pfchavgmiaNxRgwbcUhWpxJATH2hU8kwQ,4591
|
91
93
|
clarifai/utils/constants.py,sha256=goxDnTuI6jtDhP2pWasMGcfd4FL3k6qUs2PE67EZOeM,1882
|
92
|
-
clarifai/utils/logging.py,sha256=
|
94
|
+
clarifai/utils/logging.py,sha256=0we53uTqUvzrulC86whu-oeWNxn1JjJL0OQ98Bwf9vo,15198
|
93
95
|
clarifai/utils/misc.py,sha256=x7JP8oxU672Z9yAav47Y1anFiL4RD8WvlKBHMVlbyZM,3137
|
94
96
|
clarifai/utils/model_train.py,sha256=0XSAoTkSsrwf4f-W9yw2mkXZtkal7LBLJSoi86CFCn4,9250
|
95
97
|
clarifai/utils/protobuf.py,sha256=VMhnNsPuWQ16VarKm8BOr5zccXMe26UlrxdJxIzEZNM,6220
|
@@ -101,9 +103,9 @@ clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
101
103
|
clarifai/workflows/export.py,sha256=Oq3RVNKvv1iH46U6oIjXa-MXWJ4sTlXr_NSfwoxr3H4,2149
|
102
104
|
clarifai/workflows/utils.py,sha256=ESL3INcouNcLKCh-nMpfXX-YbtCzX7tz7hT57_RGQ3M,2079
|
103
105
|
clarifai/workflows/validate.py,sha256=UhmukyHkfxiMFrPPeBdUTiCOHQT5-shqivlBYEyKTlU,2931
|
104
|
-
clarifai-11.4.
|
105
|
-
clarifai-11.4.
|
106
|
-
clarifai-11.4.
|
107
|
-
clarifai-11.4.
|
108
|
-
clarifai-11.4.
|
109
|
-
clarifai-11.4.
|
106
|
+
clarifai-11.4.2.dist-info/licenses/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
|
107
|
+
clarifai-11.4.2.dist-info/METADATA,sha256=8ezcxLu3AgPvciJRlpPHEXWoJCfmzrliSXPhTN9nwaM,22398
|
108
|
+
clarifai-11.4.2.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
109
|
+
clarifai-11.4.2.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
|
110
|
+
clarifai-11.4.2.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
|
111
|
+
clarifai-11.4.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|