clarifai 11.4.0__tar.gz → 11.4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. {clarifai-11.4.0/clarifai.egg-info → clarifai-11.4.2}/PKG-INFO +1 -2
  2. clarifai-11.4.2/clarifai/__init__.py +1 -0
  3. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/dataset.py +6 -0
  4. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/models/model_builder.py +140 -36
  5. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/models/model_class.py +5 -22
  6. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/models/model_run_locally.py +0 -4
  7. clarifai-11.4.2/clarifai/runners/models/visual_classifier_class.py +75 -0
  8. clarifai-11.4.2/clarifai/runners/models/visual_detector_class.py +79 -0
  9. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/code_script.py +41 -44
  10. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/const.py +15 -0
  11. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/data_utils.py +195 -15
  12. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/loader.py +23 -2
  13. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/method_signatures.py +6 -6
  14. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/logging.py +22 -5
  15. {clarifai-11.4.0 → clarifai-11.4.2/clarifai.egg-info}/PKG-INFO +1 -2
  16. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai.egg-info/SOURCES.txt +2 -0
  17. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai.egg-info/requires.txt +0 -1
  18. {clarifai-11.4.0 → clarifai-11.4.2}/requirements.txt +0 -1
  19. clarifai-11.4.0/clarifai/__init__.py +0 -1
  20. {clarifai-11.4.0 → clarifai-11.4.2}/LICENSE +0 -0
  21. {clarifai-11.4.0 → clarifai-11.4.2}/MANIFEST.in +0 -0
  22. {clarifai-11.4.0 → clarifai-11.4.2}/README.md +0 -0
  23. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/cli/README.md +0 -0
  24. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/cli/__init__.py +0 -0
  25. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/cli/__main__.py +0 -0
  26. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/cli/base.py +0 -0
  27. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/cli/compute_cluster.py +0 -0
  28. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/cli/deployment.py +0 -0
  29. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/cli/model.py +0 -0
  30. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/cli/nodepool.py +0 -0
  31. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/cli.py +0 -0
  32. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/__init__.py +0 -0
  33. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/app.py +0 -0
  34. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/auth/__init__.py +0 -0
  35. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/auth/helper.py +0 -0
  36. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/auth/register.py +0 -0
  37. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/auth/stub.py +0 -0
  38. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/base.py +0 -0
  39. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/compute_cluster.py +0 -0
  40. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/deployment.py +0 -0
  41. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/input.py +0 -0
  42. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/lister.py +0 -0
  43. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/model.py +0 -0
  44. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/model_client.py +0 -0
  45. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/module.py +0 -0
  46. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/nodepool.py +0 -0
  47. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/runner.py +0 -0
  48. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/search.py +0 -0
  49. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/user.py +0 -0
  50. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/client/workflow.py +0 -0
  51. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/constants/base.py +0 -0
  52. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/constants/dataset.py +0 -0
  53. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/constants/input.py +0 -0
  54. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/constants/model.py +0 -0
  55. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/constants/rag.py +0 -0
  56. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/constants/search.py +0 -0
  57. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/constants/workflow.py +0 -0
  58. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/__init__.py +0 -0
  59. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/export/__init__.py +0 -0
  60. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/export/inputs_annotations.py +0 -0
  61. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/__init__.py +0 -0
  62. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/base.py +0 -0
  63. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/features.py +0 -0
  64. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/image.py +0 -0
  65. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/loaders/README.md +0 -0
  66. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/loaders/__init__.py +0 -0
  67. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/loaders/coco_captions.py +0 -0
  68. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/loaders/coco_detection.py +0 -0
  69. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/loaders/imagenet_classification.py +0 -0
  70. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/loaders/xview_detection.py +0 -0
  71. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/multimodal.py +0 -0
  72. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/text.py +0 -0
  73. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/datasets/upload/utils.py +0 -0
  74. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/errors.py +0 -0
  75. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/models/__init__.py +0 -0
  76. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/models/api.py +0 -0
  77. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/modules/README.md +0 -0
  78. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/modules/__init__.py +0 -0
  79. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/modules/css.py +0 -0
  80. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/modules/pages.py +0 -0
  81. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/modules/style.css +0 -0
  82. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/rag/__init__.py +0 -0
  83. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/rag/rag.py +0 -0
  84. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/rag/utils.py +0 -0
  85. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/__init__.py +0 -0
  86. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/dockerfile_template/Dockerfile.template +0 -0
  87. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/models/__init__.py +0 -0
  88. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/models/model_runner.py +0 -0
  89. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/models/model_servicer.py +0 -0
  90. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/server.py +0 -0
  91. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/__init__.py +0 -0
  92. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/data_types/__init__.py +0 -0
  93. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/data_types/data_types.py +0 -0
  94. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/openai_convertor.py +0 -0
  95. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/serializers.py +0 -0
  96. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/runners/utils/url_fetcher.py +0 -0
  97. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/schema/search.py +0 -0
  98. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/urls/helper.py +0 -0
  99. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/__init__.py +0 -0
  100. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/cli.py +0 -0
  101. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/config.py +0 -0
  102. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/constants.py +0 -0
  103. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/evaluation/__init__.py +0 -0
  104. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/evaluation/helpers.py +0 -0
  105. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/evaluation/main.py +0 -0
  106. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/evaluation/testset_annotation_parser.py +0 -0
  107. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/misc.py +0 -0
  108. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/model_train.py +0 -0
  109. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/utils/protobuf.py +0 -0
  110. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/versions.py +0 -0
  111. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/workflows/__init__.py +0 -0
  112. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/workflows/export.py +0 -0
  113. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/workflows/utils.py +0 -0
  114. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai/workflows/validate.py +0 -0
  115. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai.egg-info/dependency_links.txt +0 -0
  116. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai.egg-info/entry_points.txt +0 -0
  117. {clarifai-11.4.0 → clarifai-11.4.2}/clarifai.egg-info/top_level.txt +0 -0
  118. {clarifai-11.4.0 → clarifai-11.4.2}/pyproject.toml +0 -0
  119. {clarifai-11.4.0 → clarifai-11.4.2}/setup.cfg +0 -0
  120. {clarifai-11.4.0 → clarifai-11.4.2}/setup.py +0 -0
  121. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_app.py +0 -0
  122. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_auth.py +0 -0
  123. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_data_upload.py +0 -0
  124. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_eval.py +0 -0
  125. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_misc.py +0 -0
  126. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_model_predict.py +0 -0
  127. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_model_train.py +0 -0
  128. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_modules.py +0 -0
  129. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_rag.py +0 -0
  130. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_search.py +0 -0
  131. {clarifai-11.4.0 → clarifai-11.4.2}/tests/test_stub.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clarifai
3
- Version: 11.4.0
3
+ Version: 11.4.2
4
4
  Home-page: https://github.com/Clarifai/clarifai-python
5
5
  Author: Clarifai
6
6
  Author-email: support@clarifai.com
@@ -23,7 +23,6 @@ Requires-Dist: clarifai-grpc>=11.3.4
23
23
  Requires-Dist: clarifai-protocol>=0.0.23
24
24
  Requires-Dist: numpy>=1.22.0
25
25
  Requires-Dist: tqdm>=4.65.0
26
- Requires-Dist: rich>=13.4.2
27
26
  Requires-Dist: PyYAML>=6.0.1
28
27
  Requires-Dist: schema==0.7.5
29
28
  Requires-Dist: Pillow>=9.5.0
@@ -0,0 +1 @@
1
+ __version__ = "11.4.2"
@@ -685,6 +685,12 @@ class Dataset(Lister, BaseClient):
685
685
  Note:
686
686
  This is a beta feature and is subject to change.
687
687
  """
688
+ try:
689
+ import rich # noqa: F401
690
+ except ImportError:
691
+ raise UserError(
692
+ "Rich library is not installed. Please install it using pip install rich>=13.4.2"
693
+ )
688
694
  self.logger.info("Getting dataset upload status...")
689
695
  dataset_version_id = uuid.uuid4().hex
690
696
  _ = self.create_version(id=dataset_version_id, description="SDK Upload Status")
@@ -14,15 +14,17 @@ import yaml
14
14
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2
15
15
  from clarifai_grpc.grpc.api.status import status_code_pb2
16
16
  from google.protobuf import json_format
17
- from rich import print
18
- from rich.markup import escape
19
17
 
20
18
  from clarifai.client.base import BaseClient
21
19
  from clarifai.runners.models.model_class import ModelClass
22
20
  from clarifai.runners.utils.const import (
21
+ AMD_PYTHON_BASE_IMAGE,
22
+ AMD_VLLM_BASE_IMAGE,
23
23
  AVAILABLE_PYTHON_IMAGES,
24
24
  AVAILABLE_TORCH_IMAGES,
25
25
  CONCEPTS_REQUIRED_MODEL_TYPE,
26
+ DEFAULT_AMD_GPU_VERSION,
27
+ DEFAULT_AMD_TORCH_VERSION,
26
28
  DEFAULT_DOWNLOAD_CHECKPOINT_WHEN,
27
29
  DEFAULT_PYTHON_VERSION,
28
30
  DEFAULT_RUNTIME_DOWNLOAD_PATH,
@@ -43,13 +45,6 @@ dependencies = [
43
45
  ]
44
46
 
45
47
 
46
- def _clear_line(n: int = 1) -> None:
47
- LINE_UP = '\033[1A' # Move cursor up one line
48
- LINE_CLEAR = '\x1b[2K' # Clear the entire line
49
- for _ in range(n):
50
- print(LINE_UP, end=LINE_CLEAR, flush=True)
51
-
52
-
53
48
  def is_related(object_class, main_class):
54
49
  # Check if the object_class is a subclass of main_class
55
50
  if issubclass(object_class, main_class):
@@ -361,13 +356,23 @@ class ModelBuilder:
361
356
  if self.config.get("checkpoints"):
362
357
  loader_type, _, hf_token, _, _, _ = self._validate_config_checkpoints()
363
358
 
364
- if loader_type == "huggingface" and hf_token:
365
- is_valid_token = HuggingFaceLoader.validate_hftoken(hf_token)
366
- if not is_valid_token:
359
+ if loader_type == "huggingface":
360
+ is_valid_token = hf_token and HuggingFaceLoader.validate_hftoken(hf_token)
361
+ if not is_valid_token and hf_token:
362
+ logger.info(
363
+ "Continuing without Hugging Face token for validating config in model builder."
364
+ )
365
+
366
+ has_repo_access = HuggingFaceLoader.validate_hf_repo_access(
367
+ repo_id=self.config.get("checkpoints", {}).get("repo_id"),
368
+ token=hf_token if is_valid_token else None,
369
+ )
370
+
371
+ if not has_repo_access:
367
372
  logger.error(
368
- "Invalid Hugging Face token provided in the config file, this might cause issues with downloading the restricted model checkpoints."
373
+ f"Invalid Hugging Face repo access for repo {self.config.get('checkpoints').get('repo_id')}. Please check your repo and try again."
369
374
  )
370
- logger.info("Continuing without Hugging Face token")
375
+ sys.exit("Token does not have access to HuggingFace repo , exiting.")
371
376
 
372
377
  num_threads = self.config.get("num_threads")
373
378
  if num_threads or num_threads == 0:
@@ -532,6 +537,30 @@ class ModelBuilder:
532
537
  dependencies_version[dependency] = version if version else None
533
538
  return dependencies_version
534
539
 
540
+ def _is_amd(self):
541
+ """
542
+ Check if the model is AMD or not.
543
+ """
544
+ is_amd_gpu = False
545
+ is_nvidia_gpu = False
546
+ if "inference_compute_info" in self.config:
547
+ inference_compute_info = self.config.get('inference_compute_info')
548
+ if 'accelerator_type' in inference_compute_info:
549
+ for accelerator in inference_compute_info['accelerator_type']:
550
+ if 'amd' in accelerator.lower():
551
+ is_amd_gpu = True
552
+ elif 'nvidia' in accelerator.lower():
553
+ is_nvidia_gpu = True
554
+ if is_amd_gpu and is_nvidia_gpu:
555
+ raise Exception(
556
+ "Both AMD and NVIDIA GPUs are specified in the config file, please use only one type of GPU."
557
+ )
558
+ if is_amd_gpu:
559
+ logger.info("Using AMD base image to build the Docker image and upload the model")
560
+ elif is_nvidia_gpu:
561
+ logger.info("Using NVIDIA base image to build the Docker image and upload the model")
562
+ return is_amd_gpu
563
+
535
564
  def create_dockerfile(self):
536
565
  dockerfile_template = os.path.join(
537
566
  os.path.dirname(os.path.dirname(__file__)),
@@ -562,30 +591,85 @@ class ModelBuilder:
562
591
  )
563
592
  python_version = DEFAULT_PYTHON_VERSION
564
593
 
565
- # This is always the final image used for runtime.
566
- final_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
567
- downloader_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
568
-
569
594
  # Parse the requirements.txt file to determine the base image
570
595
  dependencies = self._parse_requirements()
571
- if 'torch' in dependencies and dependencies['torch']:
572
- torch_version = dependencies['torch']
573
-
574
- # Sort in reverse so that newer cuda versions come first and are preferred.
575
- for image in sorted(AVAILABLE_TORCH_IMAGES, reverse=True):
576
- if torch_version in image and f'py{python_version}' in image:
577
- # like cu124, rocm6.3, etc.
578
- gpu_version = image.split('-')[-1]
579
- final_image = TORCH_BASE_IMAGE.format(
580
- torch_version=torch_version,
581
- python_version=python_version,
582
- gpu_version=gpu_version,
596
+
597
+ is_amd_gpu = self._is_amd()
598
+ if is_amd_gpu:
599
+ final_image = AMD_PYTHON_BASE_IMAGE.format(python_version=python_version)
600
+ downloader_image = AMD_PYTHON_BASE_IMAGE.format(python_version=python_version)
601
+ if 'vllm' in dependencies:
602
+ if python_version != DEFAULT_PYTHON_VERSION:
603
+ raise Exception(
604
+ f"vLLM is not supported with Python version {python_version}, please use Python version {DEFAULT_PYTHON_VERSION} in your config.yaml"
605
+ )
606
+ torch_version = dependencies.get('torch', None)
607
+ if 'torch' in dependencies:
608
+ if python_version != DEFAULT_PYTHON_VERSION:
609
+ raise Exception(
610
+ f"torch is not supported with Python version {python_version}, please use Python version {DEFAULT_PYTHON_VERSION} in your config.yaml"
611
+ )
612
+ if not torch_version:
613
+ logger.info(
614
+ f"torch version not found in requirements.txt, using the default version {DEFAULT_AMD_TORCH_VERSION}"
615
+ )
616
+ torch_version = DEFAULT_AMD_TORCH_VERSION
617
+ if torch_version not in [DEFAULT_AMD_TORCH_VERSION]:
618
+ raise Exception(
619
+ f"torch version {torch_version} not supported, please use one of the following versions: {DEFAULT_AMD_TORCH_VERSION} in your requirements.txt"
620
+ )
621
+ python_version = DEFAULT_PYTHON_VERSION
622
+ gpu_version = DEFAULT_AMD_GPU_VERSION
623
+ final_image = AMD_VLLM_BASE_IMAGE.format(
624
+ torch_version=torch_version,
625
+ python_version=python_version,
626
+ gpu_version=gpu_version,
627
+ )
628
+ logger.info("Using vLLM base image to build the Docker image")
629
+ elif 'torch' in dependencies:
630
+ torch_version = dependencies['torch']
631
+ if python_version != DEFAULT_PYTHON_VERSION:
632
+ raise Exception(
633
+ f"torch is not supported with Python version {python_version}, please use Python version {DEFAULT_PYTHON_VERSION} in your config.yaml"
583
634
  )
635
+ if not torch_version:
584
636
  logger.info(
585
- f"Using Torch version {torch_version} base image to build the Docker image"
637
+ f"torch version not found in requirements.txt, using the default version {DEFAULT_AMD_TORCH_VERSION}"
586
638
  )
587
- break
588
-
639
+ torch_version = DEFAULT_AMD_TORCH_VERSION
640
+ if torch_version not in [DEFAULT_AMD_TORCH_VERSION]:
641
+ raise Exception(
642
+ f"torch version {torch_version} not supported, please use one of the following versions: {DEFAULT_AMD_TORCH_VERSION} in your requirements.txt"
643
+ )
644
+ python_version = DEFAULT_PYTHON_VERSION
645
+ gpu_version = DEFAULT_AMD_GPU_VERSION
646
+ final_image = TORCH_BASE_IMAGE.format(
647
+ torch_version=torch_version,
648
+ python_version=python_version,
649
+ gpu_version=gpu_version,
650
+ )
651
+ logger.info(
652
+ f"Using Torch version {torch_version} base image to build the Docker image"
653
+ )
654
+ else:
655
+ final_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
656
+ downloader_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
657
+ if 'torch' in dependencies and dependencies['torch']:
658
+ torch_version = dependencies['torch']
659
+ # Sort in reverse so that newer cuda versions come first and are preferred.
660
+ for image in sorted(AVAILABLE_TORCH_IMAGES, reverse=True):
661
+ if torch_version in image and f'py{python_version}' in image:
662
+ # like cu124, rocm6.3, etc.
663
+ gpu_version = image.split('-')[-1]
664
+ final_image = TORCH_BASE_IMAGE.format(
665
+ torch_version=torch_version,
666
+ python_version=python_version,
667
+ gpu_version=gpu_version,
668
+ )
669
+ logger.info(
670
+ f"Using Torch version {torch_version} base image to build the Docker image"
671
+ )
672
+ break
589
673
  if 'clarifai' not in dependencies:
590
674
  raise Exception(
591
675
  f"clarifai not found in requirements.txt, please add clarifai to the requirements.txt file with a fixed version. Current version is clarifai=={CLIENT_VERSION}"
@@ -835,7 +919,6 @@ class ModelBuilder:
835
919
  percent_completed = response.status.percent_completed
836
920
  details = response.status.details
837
921
 
838
- _clear_line()
839
922
  print(
840
923
  f"Status: {response.status.description}, Progress: {percent_completed}% - {details} ",
841
924
  f"request_id: {response.status.req_id}",
@@ -849,7 +932,23 @@ class ModelBuilder:
849
932
  logger.info(f"Created Model Version ID: {self.model_version_id}")
850
933
  logger.info(f"Full url to that version is: {self.model_url}")
851
934
  try:
852
- self.monitor_model_build()
935
+ is_uploaded = self.monitor_model_build()
936
+ if is_uploaded:
937
+ from clarifai.runners.utils import code_script
938
+
939
+ method_signatures = self.get_method_signatures()
940
+ snippet = code_script.generate_client_script(
941
+ method_signatures,
942
+ user_id=self.client.user_app_id.user_id,
943
+ app_id=self.client.user_app_id.app_id,
944
+ model_id=self.model_proto.id,
945
+ )
946
+ logger.info("""\n
947
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
948
+ # Here is a code snippet to call this model:
949
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
950
+ """)
951
+ logger.info(snippet)
853
952
  finally:
854
953
  if os.path.exists(self.tar_file):
855
954
  logger.debug(f"Cleaning up upload file: {self.tar_file}")
@@ -933,7 +1032,12 @@ class ModelBuilder:
933
1032
  for log_entry in logs.log_entries:
934
1033
  if log_entry.url not in seen_logs:
935
1034
  seen_logs.add(log_entry.url)
936
- logger.info(f"{escape(log_entry.message.strip())}")
1035
+ log_entry_msg = re.sub(
1036
+ r"(\\*)(\[[a-z#/@][^[]*?])",
1037
+ lambda m: f"{m.group(1)}{m.group(1)}\\{m.group(2)}",
1038
+ log_entry.message.strip(),
1039
+ )
1040
+ logger.info(log_entry_msg)
937
1041
  if status_code == status_code_pb2.MODEL_BUILDING:
938
1042
  print(
939
1043
  f"Model is building... (elapsed {time.time() - st:.1f}s)", end='\r', flush=True
@@ -9,7 +9,6 @@ from typing import Any, Dict, Iterator, List
9
9
 
10
10
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2
11
11
  from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
12
- from google.protobuf import json_format
13
12
 
14
13
  from clarifai.runners.utils import data_types
15
14
  from clarifai.runners.utils.data_utils import DataConverter
@@ -100,7 +99,6 @@ class ModelClass(ABC):
100
99
  try:
101
100
  # TODO add method name field to proto
102
101
  method_name = 'predict'
103
- inference_params = get_inference_params(request)
104
102
  if len(request.inputs) > 0 and '_method_name' in request.inputs[0].data.metadata:
105
103
  method_name = request.inputs[0].data.metadata['_method_name']
106
104
  if (
@@ -124,7 +122,7 @@ class ModelClass(ABC):
124
122
  input.data.CopyFrom(new_data)
125
123
  # convert inputs to python types
126
124
  inputs = self._convert_input_protos_to_python(
127
- request.inputs, inference_params, signature.input_fields, python_param_types
125
+ request.inputs, signature.input_fields, python_param_types
128
126
  )
129
127
  if len(inputs) == 1:
130
128
  inputs = inputs[0]
@@ -163,7 +161,6 @@ class ModelClass(ABC):
163
161
  ) -> Iterator[service_pb2.MultiOutputResponse]:
164
162
  try:
165
163
  method_name = 'generate'
166
- inference_params = get_inference_params(request)
167
164
  if len(request.inputs) > 0 and '_method_name' in request.inputs[0].data.metadata:
168
165
  method_name = request.inputs[0].data.metadata['_method_name']
169
166
  method = getattr(self, method_name)
@@ -180,7 +177,7 @@ class ModelClass(ABC):
180
177
  )
181
178
  input.data.CopyFrom(new_data)
182
179
  inputs = self._convert_input_protos_to_python(
183
- request.inputs, inference_params, signature.input_fields, python_param_types
180
+ request.inputs, signature.input_fields, python_param_types
184
181
  )
185
182
  if len(inputs) == 1:
186
183
  inputs = inputs[0]
@@ -226,7 +223,6 @@ class ModelClass(ABC):
226
223
  assert len(request.inputs) == 1, "Streaming requires exactly one input"
227
224
 
228
225
  method_name = 'stream'
229
- inference_params = get_inference_params(request)
230
226
  if len(request.inputs) > 0 and '_method_name' in request.inputs[0].data.metadata:
231
227
  method_name = request.inputs[0].data.metadata['_method_name']
232
228
  method = getattr(self, method_name)
@@ -251,7 +247,7 @@ class ModelClass(ABC):
251
247
  input.data.CopyFrom(new_data)
252
248
  # convert all inputs for the first request, including the first stream value
253
249
  inputs = self._convert_input_protos_to_python(
254
- request.inputs, inference_params, signature.input_fields, python_param_types
250
+ request.inputs, signature.input_fields, python_param_types
255
251
  )
256
252
  kwargs = inputs[0]
257
253
 
@@ -264,7 +260,7 @@ class ModelClass(ABC):
264
260
  # subsequent streaming items contain only the streaming input
265
261
  for request in request_iterator:
266
262
  item = self._convert_input_protos_to_python(
267
- request.inputs, inference_params, [stream_sig], python_param_types
263
+ request.inputs, [stream_sig], python_param_types
268
264
  )
269
265
  item = item[0][stream_argname]
270
266
  yield item
@@ -297,13 +293,12 @@ class ModelClass(ABC):
297
293
  def _convert_input_protos_to_python(
298
294
  self,
299
295
  inputs: List[resources_pb2.Input],
300
- inference_params: dict,
301
296
  variables_signature: List[resources_pb2.ModelTypeField],
302
297
  python_param_types,
303
298
  ) -> List[Dict[str, Any]]:
304
299
  result = []
305
300
  for input in inputs:
306
- kwargs = deserialize(input.data, variables_signature, inference_params)
301
+ kwargs = deserialize(input.data, variables_signature)
307
302
  # dynamic cast to annotated types
308
303
  for k, v in kwargs.items():
309
304
  if k not in python_param_types:
@@ -374,18 +369,6 @@ class ModelClass(ABC):
374
369
  return method_info
375
370
 
376
371
 
377
- # Helper function to get the inference params
378
- def get_inference_params(request) -> dict:
379
- """Get the inference params from the request."""
380
- inference_params = {}
381
- if request.model.model_version.id != "":
382
- output_info = request.model.model_version.output_info
383
- output_info = json_format.MessageToDict(output_info, preserving_proto_field_name=True)
384
- if "params" in output_info:
385
- inference_params = output_info["params"]
386
- return inference_params
387
-
388
-
389
372
  class _MethodInfo:
390
373
  def __init__(self, method):
391
374
  self.name = method.__name__
@@ -442,10 +442,6 @@ def main(
442
442
  manager = ModelRunLocally(model_path)
443
443
  # get whatever stage is in config.yaml to force download now
444
444
  # also always write to where upload/build wants to, not the /tmp folder that runtime stage uses
445
- _, _, _, when, _, _ = manager.builder._validate_config_checkpoints()
446
- manager.builder.download_checkpoints(
447
- stage=when, checkpoint_path_override=manager.builder.checkpoint_path
448
- )
449
445
  if inside_container:
450
446
  if not manager.is_docker_installed():
451
447
  sys.exit(1)
@@ -0,0 +1,75 @@
1
+ import os
2
+ import tempfile
3
+ from io import BytesIO
4
+ from typing import Dict, Iterator, List
5
+
6
+ import cv2
7
+ import torch
8
+ from PIL import Image as PILImage
9
+
10
+ from clarifai.runners.models.model_class import ModelClass
11
+ from clarifai.runners.utils.data_types import Concept, Frame, Image
12
+ from clarifai.utils.logging import logger
13
+
14
+
15
+ class VisualClassifierClass(ModelClass):
16
+ """Base class for visual classification models supporting image and video processing."""
17
+
18
+ @staticmethod
19
+ def preprocess_image(image_bytes: bytes) -> PILImage:
20
+ """Convert image bytes to PIL Image."""
21
+ return PILImage.open(BytesIO(image_bytes)).convert("RGB")
22
+
23
+ @staticmethod
24
+ def video_to_frames(video_bytes: bytes) -> Iterator[Frame]:
25
+ """Convert video bytes to frames.
26
+
27
+ Args:
28
+ video_bytes: Raw video data in bytes
29
+
30
+ Yields:
31
+ Frame with JPEG encoded frame data as bytes and timestamp in milliseconds
32
+ """
33
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_video_file:
34
+ temp_video_file.write(video_bytes)
35
+ temp_video_path = temp_video_file.name
36
+ logger.debug(f"temp_video_path: {temp_video_path}")
37
+
38
+ video = cv2.VideoCapture(temp_video_path)
39
+ logger.debug(f"video opened: {video.isOpened()}")
40
+
41
+ while video.isOpened():
42
+ ret, frame = video.read()
43
+ if not ret:
44
+ break
45
+ # Get frame timestamp in milliseconds
46
+ timestamp_ms = video.get(cv2.CAP_PROP_POS_MSEC)
47
+ frame_bytes = cv2.imencode('.jpg', frame)[1].tobytes()
48
+ yield Frame(image=Image(bytes=frame_bytes), time=timestamp_ms)
49
+
50
+ video.release()
51
+ os.unlink(temp_video_path)
52
+
53
+ @staticmethod
54
+ def process_concepts(
55
+ logits: torch.Tensor, threshold: float, model_labels: Dict[int, str]
56
+ ) -> List[List[Concept]]:
57
+ """Convert model logits into a structured format of concepts.
58
+
59
+ Args:
60
+ logits: Model output logits as a tensor (batch_size x num_classes)
61
+ model_labels: Dictionary mapping label indices to label names
62
+
63
+ Returns:
64
+ List of lists containing Concept objects for each input in the batch
65
+ """
66
+ outputs = []
67
+ for logit in logits:
68
+ probs = torch.softmax(logit, dim=-1)
69
+ sorted_indices = torch.argsort(probs, dim=-1, descending=True)
70
+ output_concepts = []
71
+ for idx in sorted_indices:
72
+ concept = Concept(name=model_labels[idx.item()], value=probs[idx].item())
73
+ output_concepts.append(concept)
74
+ outputs.append(output_concepts)
75
+ return outputs
@@ -0,0 +1,79 @@
1
+ import os
2
+ import tempfile
3
+ from io import BytesIO
4
+ from typing import Dict, Iterator, List
5
+
6
+ import cv2
7
+ import torch
8
+ from PIL import Image as PILImage
9
+
10
+ from clarifai.runners.models.model_class import ModelClass
11
+ from clarifai.runners.utils.data_types import Concept, Frame, Image, Region
12
+ from clarifai.utils.logging import logger
13
+
14
+
15
+ class VisualDetectorClass(ModelClass):
16
+ """Base class for visual detection models supporting image and video processing."""
17
+
18
+ @staticmethod
19
+ def preprocess_image(image_bytes: bytes) -> PILImage:
20
+ """Convert image bytes to PIL Image."""
21
+ return PILImage.open(BytesIO(image_bytes)).convert("RGB")
22
+
23
+ @staticmethod
24
+ def video_to_frames(video_bytes: bytes) -> Iterator[Frame]:
25
+ """Convert video bytes to frames.
26
+
27
+ Args:
28
+ video_bytes: Raw video data in bytes
29
+
30
+ Yields:
31
+ Frame with JPEG encoded frame data as bytes and timestamp in milliseconds
32
+ """
33
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_video_file:
34
+ temp_video_file.write(video_bytes)
35
+ temp_video_path = temp_video_file.name
36
+ logger.debug(f"temp_video_path: {temp_video_path}")
37
+
38
+ video = cv2.VideoCapture(temp_video_path)
39
+ logger.debug(f"video opened: {video.isOpened()}")
40
+
41
+ while video.isOpened():
42
+ ret, frame = video.read()
43
+ if not ret:
44
+ break
45
+ # Get frame timestamp in milliseconds
46
+ timestamp_ms = video.get(cv2.CAP_PROP_POS_MSEC)
47
+ frame_bytes = cv2.imencode('.jpg', frame)[1].tobytes()
48
+ yield Frame(image=Image(bytes=frame_bytes), time=timestamp_ms)
49
+
50
+ video.release()
51
+ os.unlink(temp_video_path)
52
+
53
+ @staticmethod
54
+ def process_detections(
55
+ results: List[Dict[str, torch.Tensor]], threshold: float, model_labels: Dict[int, str]
56
+ ) -> List[List[Region]]:
57
+ """Convert model outputs into a structured format of detections.
58
+
59
+ Args:
60
+ results: Raw detection results from model
61
+ threshold: Confidence threshold for detections
62
+ model_labels: Dictionary mapping label indices to names
63
+
64
+ Returns:
65
+ List of lists containing Region objects for each detection
66
+ """
67
+ outputs = []
68
+ for result in results:
69
+ detections = []
70
+ for score, label_idx, box in zip(result["scores"], result["labels"], result["boxes"]):
71
+ if score > threshold:
72
+ label = model_labels[label_idx.item()]
73
+ detections.append(
74
+ Region(
75
+ box=box.tolist(), concepts=[Concept(name=label, value=score.item())]
76
+ )
77
+ )
78
+ outputs.append(detections)
79
+ return outputs