clarifai 10.8.3__tar.gz → 10.8.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {clarifai-10.8.3/clarifai.egg-info → clarifai-10.8.5}/PKG-INFO +2 -1
- clarifai-10.8.5/clarifai/__init__.py +1 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/dataset.py +9 -3
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/constants/dataset.py +1 -1
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/base.py +6 -3
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/features.py +10 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/image.py +22 -13
- clarifai-10.8.5/clarifai/datasets/upload/multimodal.py +70 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/text.py +8 -5
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/utils/data_handler.py +31 -44
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/utils/loader.py +6 -5
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/utils/misc.py +6 -0
- {clarifai-10.8.3 → clarifai-10.8.5/clarifai.egg-info}/PKG-INFO +2 -1
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai.egg-info/SOURCES.txt +1 -44
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai.egg-info/requires.txt +1 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/requirements.txt +1 -0
- clarifai-10.8.3/clarifai/__init__.py +0 -1
- clarifai-10.8.3/clarifai/models/model_serving/README.md +0 -158
- clarifai-10.8.3/clarifai/models/model_serving/__init__.py +0 -14
- clarifai-10.8.3/clarifai/models/model_serving/cli/__init__.py +0 -12
- clarifai-10.8.3/clarifai/models/model_serving/cli/_utils.py +0 -53
- clarifai-10.8.3/clarifai/models/model_serving/cli/base.py +0 -14
- clarifai-10.8.3/clarifai/models/model_serving/cli/build.py +0 -79
- clarifai-10.8.3/clarifai/models/model_serving/cli/clarifai_clis.py +0 -33
- clarifai-10.8.3/clarifai/models/model_serving/cli/create.py +0 -171
- clarifai-10.8.3/clarifai/models/model_serving/cli/example_cli.py +0 -34
- clarifai-10.8.3/clarifai/models/model_serving/cli/login.py +0 -26
- clarifai-10.8.3/clarifai/models/model_serving/cli/upload.py +0 -183
- clarifai-10.8.3/clarifai/models/model_serving/constants.py +0 -21
- clarifai-10.8.3/clarifai/models/model_serving/docs/cli.md +0 -161
- clarifai-10.8.3/clarifai/models/model_serving/docs/concepts.md +0 -229
- clarifai-10.8.3/clarifai/models/model_serving/docs/dependencies.md +0 -11
- clarifai-10.8.3/clarifai/models/model_serving/docs/inference_parameters.md +0 -139
- clarifai-10.8.3/clarifai/models/model_serving/docs/model_types.md +0 -19
- clarifai-10.8.3/clarifai/models/model_serving/model_config/__init__.py +0 -16
- clarifai-10.8.3/clarifai/models/model_serving/model_config/base.py +0 -369
- clarifai-10.8.3/clarifai/models/model_serving/model_config/config.py +0 -312
- clarifai-10.8.3/clarifai/models/model_serving/model_config/inference_parameter.py +0 -129
- clarifai-10.8.3/clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -25
- clarifai-10.8.3/clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -19
- clarifai-10.8.3/clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -20
- clarifai-10.8.3/clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -19
- clarifai-10.8.3/clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -19
- clarifai-10.8.3/clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -22
- clarifai-10.8.3/clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -32
- clarifai-10.8.3/clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -19
- clarifai-10.8.3/clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -19
- clarifai-10.8.3/clarifai/models/model_serving/model_config/output.py +0 -133
- clarifai-10.8.3/clarifai/models/model_serving/model_config/triton/__init__.py +0 -14
- clarifai-10.8.3/clarifai/models/model_serving/model_config/triton/serializer.py +0 -136
- clarifai-10.8.3/clarifai/models/model_serving/model_config/triton/triton_config.py +0 -182
- clarifai-10.8.3/clarifai/models/model_serving/model_config/triton/wrappers.py +0 -281
- clarifai-10.8.3/clarifai/models/model_serving/repo_build/__init__.py +0 -14
- clarifai-10.8.3/clarifai/models/model_serving/repo_build/build.py +0 -198
- clarifai-10.8.3/clarifai/models/model_serving/repo_build/static_files/_requirements.txt +0 -2
- clarifai-10.8.3/clarifai/models/model_serving/repo_build/static_files/base_test.py +0 -169
- clarifai-10.8.3/clarifai/models/model_serving/repo_build/static_files/inference.py +0 -26
- clarifai-10.8.3/clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +0 -25
- clarifai-10.8.3/clarifai/models/model_serving/repo_build/static_files/test.py +0 -40
- clarifai-10.8.3/clarifai/models/model_serving/repo_build/static_files/triton/model.py +0 -75
- clarifai-10.8.3/clarifai/models/model_serving/utils.py +0 -31
- {clarifai-10.8.3 → clarifai-10.8.5}/LICENSE +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/MANIFEST.in +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/README.md +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/cli.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/app.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/auth/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/auth/helper.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/auth/register.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/auth/stub.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/base.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/input.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/lister.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/model.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/module.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/search.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/user.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/client/workflow.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/constants/input.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/constants/model.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/constants/rag.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/constants/search.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/constants/workflow.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/export/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/export/inputs_annotations.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/loaders/README.md +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/loaders/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/loaders/coco_captions.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/loaders/coco_detection.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/loaders/imagenet_classification.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/loaders/xview_detection.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/datasets/upload/utils.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/errors.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/models/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/models/api.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/modules/README.md +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/modules/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/modules/css.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/modules/pages.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/modules/style.css +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/rag/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/rag/rag.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/rag/utils.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/dockerfile_template/Dockerfile.cpu.template +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/dockerfile_template/Dockerfile.cuda.template +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/models/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/models/base_typed_model.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/models/model_class.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/models/model_runner.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/models/model_servicer.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/models/model_upload.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/server.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/utils/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/utils/data_utils.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/utils/logging.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/runners/utils/url_fetcher.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/schema/search.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/urls/helper.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/utils/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/utils/constants.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/utils/evaluation/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/utils/evaluation/helpers.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/utils/evaluation/main.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/utils/evaluation/testset_annotation_parser.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/utils/logging.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/utils/model_train.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/versions.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/workflows/__init__.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/workflows/export.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/workflows/utils.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai/workflows/validate.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai.egg-info/dependency_links.txt +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai.egg-info/entry_points.txt +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/clarifai.egg-info/top_level.txt +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/pyproject.toml +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/setup.cfg +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/setup.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_app.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_auth.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_data_upload.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_eval.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_misc.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_model_predict.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_model_train.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_modules.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_rag.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_search.py +0 -0
- {clarifai-10.8.3 → clarifai-10.8.5}/tests/test_stub.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: clarifai
|
3
|
-
Version: 10.8.
|
3
|
+
Version: 10.8.5
|
4
4
|
Summary: Clarifai Python SDK
|
5
5
|
Home-page: https://github.com/Clarifai/clarifai-python
|
6
6
|
Author: Clarifai
|
@@ -32,6 +32,7 @@ Requires-Dist: Pillow>=9.5.0
|
|
32
32
|
Requires-Dist: inquirerpy==0.3.4
|
33
33
|
Requires-Dist: tabulate>=0.9.0
|
34
34
|
Requires-Dist: protobuf==5.27.3
|
35
|
+
Requires-Dist: fsspec==2024.6.1
|
35
36
|
Provides-Extra: all
|
36
37
|
Requires-Dist: pycocotools==2.0.6; extra == "all"
|
37
38
|
|
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "10.8.5"
|
@@ -25,6 +25,7 @@ from clarifai.datasets.export.inputs_annotations import (DatasetExportReader,
|
|
25
25
|
from clarifai.datasets.upload.base import ClarifaiDataLoader
|
26
26
|
from clarifai.datasets.upload.image import (VisualClassificationDataset, VisualDetectionDataset,
|
27
27
|
VisualSegmentationDataset)
|
28
|
+
from clarifai.datasets.upload.multimodal import MultiModalDataset
|
28
29
|
from clarifai.datasets.upload.text import TextClassificationDataset
|
29
30
|
from clarifai.datasets.upload.utils import DisplayUploadStatus
|
30
31
|
from clarifai.errors import UserError
|
@@ -352,14 +353,15 @@ class Dataset(Lister, BaseClient):
|
|
352
353
|
if input_details:
|
353
354
|
failed_input_details = [
|
354
355
|
index, failed_id, input_details.status.details,
|
355
|
-
dataset_obj.data_generator[index]
|
356
|
+
getattr(dataset_obj.data_generator[index], 'image_path', None) or
|
357
|
+
getattr(dataset_obj.data_generator[index], 'text', None),
|
356
358
|
dataset_obj.data_generator[index].labels, dataset_obj.data_generator[index].metadata
|
357
359
|
]
|
358
360
|
failed_inputs_logs.append(failed_input_details)
|
359
361
|
|
360
362
|
failed_table = tabulate(
|
361
363
|
failed_inputs_logs,
|
362
|
-
headers=["Index", "Input ID", "Status", "
|
364
|
+
headers=["Index", "Input ID", "Status", "Input", "Labels", "Metadata"],
|
363
365
|
tablefmt="grid")
|
364
366
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
365
367
|
self.logger.warning(
|
@@ -422,7 +424,8 @@ class Dataset(Lister, BaseClient):
|
|
422
424
|
if self.task not in DATASET_UPLOAD_TASKS:
|
423
425
|
raise UserError("Task should be one of \
|
424
426
|
'text_classification', 'visual_classification', \
|
425
|
-
'visual_detection', 'visual_segmentation', 'visual_captioning'"
|
427
|
+
'visual_detection', 'visual_segmentation', 'visual_captioning', 'multimodal_dataset'"
|
428
|
+
)
|
426
429
|
|
427
430
|
if self.task == "text_classification":
|
428
431
|
dataset_obj = TextClassificationDataset(dataloader, self.id)
|
@@ -433,6 +436,9 @@ class Dataset(Lister, BaseClient):
|
|
433
436
|
elif self.task == "visual_segmentation":
|
434
437
|
dataset_obj = VisualSegmentationDataset(dataloader, self.id)
|
435
438
|
|
439
|
+
elif self.task == "multimodal_dataset":
|
440
|
+
dataset_obj = MultiModalDataset(dataloader, self.id)
|
441
|
+
|
436
442
|
else: # visual_classification & visual_captioning
|
437
443
|
dataset_obj = VisualClassificationDataset(dataloader, self.id)
|
438
444
|
|
@@ -4,21 +4,24 @@ from typing import Iterator, List, Tuple, TypeVar, Union
|
|
4
4
|
from clarifai_grpc.grpc.api import resources_pb2
|
5
5
|
|
6
6
|
from clarifai.constants.dataset import DATASET_UPLOAD_TASKS
|
7
|
-
from clarifai.datasets.upload.features import (
|
7
|
+
from clarifai.datasets.upload.features import (MultiModalFeatures, TextFeatures,
|
8
|
+
VisualClassificationFeatures,
|
8
9
|
VisualDetectionFeatures, VisualSegmentationFeatures)
|
9
10
|
|
10
11
|
OutputFeaturesType = TypeVar(
|
11
12
|
'OutputFeaturesType',
|
12
13
|
bound=Union[TextFeatures, VisualClassificationFeatures, VisualDetectionFeatures,
|
13
|
-
VisualSegmentationFeatures])
|
14
|
+
VisualSegmentationFeatures, MultiModalFeatures])
|
14
15
|
|
15
16
|
|
16
17
|
class ClarifaiDataset:
|
17
18
|
"""Clarifai datasets base class."""
|
18
19
|
|
19
|
-
def __init__(self, data_generator: 'ClarifaiDataLoader', dataset_id: str
|
20
|
+
def __init__(self, data_generator: 'ClarifaiDataLoader', dataset_id: str,
|
21
|
+
max_workers: int = 4) -> None:
|
20
22
|
self.data_generator = data_generator
|
21
23
|
self.dataset_id = dataset_id
|
24
|
+
self.max_workers = max_workers
|
22
25
|
self.all_input_ids = {}
|
23
26
|
self._all_input_protos = {}
|
24
27
|
self._all_annotation_protos = defaultdict(list)
|
@@ -49,3 +49,13 @@ class VisualSegmentationFeatures:
|
|
49
49
|
metadata: Optional[dict] = None
|
50
50
|
image_bytes: Optional[bytes] = None
|
51
51
|
label_ids: Optional[List[str]] = None
|
52
|
+
|
53
|
+
|
54
|
+
@dataclass
|
55
|
+
class MultiModalFeatures:
|
56
|
+
"""Multi-modal datasets preprocessing output features."""
|
57
|
+
text: str
|
58
|
+
image_bytes: str
|
59
|
+
labels: List[Union[str, int]] = None # List[str or int] to cater for multi-class tasks
|
60
|
+
id: Optional[int] = None # image_id
|
61
|
+
metadata: Optional[dict] = None
|
@@ -1,5 +1,4 @@
|
|
1
1
|
import os
|
2
|
-
import uuid
|
3
2
|
from concurrent.futures import ThreadPoolExecutor
|
4
3
|
from typing import List, Tuple, Type
|
5
4
|
|
@@ -8,12 +7,16 @@ from google.protobuf.struct_pb2 import Struct
|
|
8
7
|
|
9
8
|
from clarifai.client.input import Inputs
|
10
9
|
from clarifai.datasets.upload.base import ClarifaiDataLoader, ClarifaiDataset
|
10
|
+
from clarifai.utils.misc import get_uuid
|
11
11
|
|
12
12
|
|
13
13
|
class VisualClassificationDataset(ClarifaiDataset):
|
14
14
|
|
15
|
-
def __init__(self,
|
16
|
-
|
15
|
+
def __init__(self,
|
16
|
+
data_generator: Type[ClarifaiDataLoader],
|
17
|
+
dataset_id: str,
|
18
|
+
max_workers: int = 4) -> None:
|
19
|
+
super().__init__(data_generator, dataset_id, max_workers)
|
17
20
|
|
18
21
|
def _extract_protos(self, batch_input_ids: List[str]
|
19
22
|
) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
|
@@ -33,7 +36,7 @@ class VisualClassificationDataset(ClarifaiDataset):
|
|
33
36
|
labels = data_item.labels if isinstance(data_item.labels,
|
34
37
|
list) else [data_item.labels] # clarifai concept
|
35
38
|
label_ids = data_item.label_ids
|
36
|
-
input_id = f"{self.dataset_id}-{
|
39
|
+
input_id = f"{self.dataset_id}-{get_uuid(8)}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
37
40
|
geo_info = data_item.geo_info
|
38
41
|
if data_item.metadata is not None:
|
39
42
|
metadata.update(data_item.metadata)
|
@@ -64,7 +67,7 @@ class VisualClassificationDataset(ClarifaiDataset):
|
|
64
67
|
geo_info=geo_info,
|
65
68
|
metadata=metadata))
|
66
69
|
|
67
|
-
with ThreadPoolExecutor(max_workers=
|
70
|
+
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
68
71
|
futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
|
69
72
|
for job in futures:
|
70
73
|
job.result()
|
@@ -75,8 +78,11 @@ class VisualClassificationDataset(ClarifaiDataset):
|
|
75
78
|
class VisualDetectionDataset(ClarifaiDataset):
|
76
79
|
"""Visual detection dataset proto class."""
|
77
80
|
|
78
|
-
def __init__(self,
|
79
|
-
|
81
|
+
def __init__(self,
|
82
|
+
data_generator: Type[ClarifaiDataLoader],
|
83
|
+
dataset_id: str,
|
84
|
+
max_workers: int = 4) -> None:
|
85
|
+
super().__init__(data_generator, dataset_id, max_workers)
|
80
86
|
|
81
87
|
def _extract_protos(self, batch_input_ids: List[int]
|
82
88
|
) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
|
@@ -101,7 +107,7 @@ class VisualDetectionDataset(ClarifaiDataset):
|
|
101
107
|
else:
|
102
108
|
label_ids = None
|
103
109
|
bboxes = data_item.bboxes # [[xmin,ymin,xmax,ymax],...,[xmin,ymin,xmax,ymax]]
|
104
|
-
input_id = f"{self.dataset_id}-{
|
110
|
+
input_id = f"{self.dataset_id}-{get_uuid(8)}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
105
111
|
if data_item.metadata is not None:
|
106
112
|
metadata.update(data_item.metadata)
|
107
113
|
else:
|
@@ -135,7 +141,7 @@ class VisualDetectionDataset(ClarifaiDataset):
|
|
135
141
|
bbox=bboxes[i],
|
136
142
|
label_id=label_ids[i] if label_ids else None))
|
137
143
|
|
138
|
-
with ThreadPoolExecutor(max_workers=
|
144
|
+
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
139
145
|
futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
|
140
146
|
for job in futures:
|
141
147
|
job.result()
|
@@ -146,8 +152,11 @@ class VisualDetectionDataset(ClarifaiDataset):
|
|
146
152
|
class VisualSegmentationDataset(ClarifaiDataset):
|
147
153
|
"""Visual segmentation dataset proto class."""
|
148
154
|
|
149
|
-
def __init__(self,
|
150
|
-
|
155
|
+
def __init__(self,
|
156
|
+
data_generator: Type[ClarifaiDataLoader],
|
157
|
+
dataset_id: str,
|
158
|
+
max_workers: int = 4) -> None:
|
159
|
+
super().__init__(data_generator, dataset_id, max_workers)
|
151
160
|
|
152
161
|
def _extract_protos(self, batch_input_ids: List[str]
|
153
162
|
) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
|
@@ -172,7 +181,7 @@ class VisualSegmentationDataset(ClarifaiDataset):
|
|
172
181
|
else:
|
173
182
|
label_ids = None
|
174
183
|
_polygons = data_item.polygons # list of polygons: [[[x,y],...,[x,y]],...]
|
175
|
-
input_id = f"{self.dataset_id}-{
|
184
|
+
input_id = f"{self.dataset_id}-{get_uuid(8)}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
176
185
|
if data_item.metadata is not None:
|
177
186
|
metadata.update(data_item.metadata)
|
178
187
|
else:
|
@@ -210,7 +219,7 @@ class VisualSegmentationDataset(ClarifaiDataset):
|
|
210
219
|
except IndexError:
|
211
220
|
continue
|
212
221
|
|
213
|
-
with ThreadPoolExecutor(max_workers=
|
222
|
+
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
214
223
|
futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
|
215
224
|
for job in futures:
|
216
225
|
job.result()
|
@@ -0,0 +1,70 @@
|
|
1
|
+
from concurrent.futures import ThreadPoolExecutor
|
2
|
+
from typing import List, Tuple, Type
|
3
|
+
|
4
|
+
from clarifai_grpc.grpc.api import resources_pb2
|
5
|
+
from google.protobuf.struct_pb2 import Struct
|
6
|
+
|
7
|
+
from clarifai.client.input import Inputs
|
8
|
+
from clarifai.datasets.upload.base import ClarifaiDataLoader, ClarifaiDataset
|
9
|
+
from clarifai.utils.misc import get_uuid
|
10
|
+
|
11
|
+
|
12
|
+
class MultiModalDataset(ClarifaiDataset):
|
13
|
+
|
14
|
+
def __init__(self,
|
15
|
+
data_generator: Type[ClarifaiDataLoader],
|
16
|
+
dataset_id: str,
|
17
|
+
max_workers: int = 4) -> None:
|
18
|
+
super().__init__(data_generator, dataset_id, max_workers)
|
19
|
+
|
20
|
+
def _extract_protos(
|
21
|
+
self,
|
22
|
+
batch_input_ids: List[str],
|
23
|
+
) -> Tuple[List[resources_pb2.Input]]:
|
24
|
+
""" Creats Multimodal (image and text) input protos for batch of input ids.
|
25
|
+
Args:
|
26
|
+
batch_input_ids: List of input IDs to retrieve the protos for.
|
27
|
+
Returns:
|
28
|
+
input_protos: List of input protos.
|
29
|
+
|
30
|
+
"""
|
31
|
+
input_protos, annotation_protos = [], []
|
32
|
+
|
33
|
+
def process_data_item(id):
|
34
|
+
data_item = self.data_generator[id]
|
35
|
+
metadata = Struct()
|
36
|
+
image_bytes = data_item.image_bytes
|
37
|
+
text = data_item.text
|
38
|
+
labels = data_item.labels if isinstance(data_item.labels, list) else [data_item.labels]
|
39
|
+
id = get_uuid(8)
|
40
|
+
input_id = f"{self.dataset_id}-{id}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
41
|
+
if data_item.metadata is not None:
|
42
|
+
metadata.update(data_item.metadata)
|
43
|
+
else:
|
44
|
+
metadata = None
|
45
|
+
|
46
|
+
self.all_input_ids[id] = input_id
|
47
|
+
if data_item.image_bytes is not None:
|
48
|
+
input_protos.append(
|
49
|
+
Inputs.get_input_from_bytes(
|
50
|
+
input_id=input_id,
|
51
|
+
image_bytes=image_bytes,
|
52
|
+
dataset_id=self.dataset_id,
|
53
|
+
labels=labels,
|
54
|
+
metadata=metadata))
|
55
|
+
else:
|
56
|
+
input_protos.append(
|
57
|
+
Inputs.get_text_input(
|
58
|
+
input_id=input_id,
|
59
|
+
raw_text=text,
|
60
|
+
dataset_id=self.dataset_id,
|
61
|
+
labels=labels,
|
62
|
+
metadata=metadata))
|
63
|
+
|
64
|
+
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
65
|
+
futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
|
66
|
+
|
67
|
+
for job in futures:
|
68
|
+
job.result()
|
69
|
+
|
70
|
+
return input_protos, annotation_protos
|
@@ -1,4 +1,3 @@
|
|
1
|
-
import uuid
|
2
1
|
from concurrent.futures import ThreadPoolExecutor
|
3
2
|
from typing import List, Tuple, Type
|
4
3
|
|
@@ -6,6 +5,7 @@ from clarifai_grpc.grpc.api import resources_pb2
|
|
6
5
|
from google.protobuf.struct_pb2 import Struct
|
7
6
|
|
8
7
|
from clarifai.client.input import Inputs
|
8
|
+
from clarifai.utils.misc import get_uuid
|
9
9
|
|
10
10
|
from .base import ClarifaiDataLoader, ClarifaiDataset
|
11
11
|
|
@@ -13,8 +13,11 @@ from .base import ClarifaiDataLoader, ClarifaiDataset
|
|
13
13
|
class TextClassificationDataset(ClarifaiDataset):
|
14
14
|
"""Upload text classification datasets to clarifai datasets"""
|
15
15
|
|
16
|
-
def __init__(self,
|
17
|
-
|
16
|
+
def __init__(self,
|
17
|
+
data_generator: Type[ClarifaiDataLoader],
|
18
|
+
dataset_id: str,
|
19
|
+
max_workers: int = 4) -> None:
|
20
|
+
super().__init__(data_generator, dataset_id, max_workers)
|
18
21
|
|
19
22
|
def _extract_protos(self, batch_input_ids: List[int]
|
20
23
|
) -> Tuple[List[resources_pb2.Input], List[resources_pb2.Annotation]]:
|
@@ -34,7 +37,7 @@ class TextClassificationDataset(ClarifaiDataset):
|
|
34
37
|
labels = data_item.labels if isinstance(data_item.labels,
|
35
38
|
list) else [data_item.labels] # clarifai concept
|
36
39
|
label_ids = data_item.label_ids
|
37
|
-
input_id = f"{self.dataset_id}-{
|
40
|
+
input_id = f"{self.dataset_id}-{get_uuid(8)}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
38
41
|
if data_item.metadata is not None:
|
39
42
|
metadata.update(data_item.metadata)
|
40
43
|
|
@@ -48,7 +51,7 @@ class TextClassificationDataset(ClarifaiDataset):
|
|
48
51
|
label_ids=label_ids,
|
49
52
|
metadata=metadata))
|
50
53
|
|
51
|
-
with ThreadPoolExecutor(max_workers=
|
54
|
+
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
52
55
|
futures = [executor.submit(process_data_item, id) for id in batch_input_ids]
|
53
56
|
for job in futures:
|
54
57
|
job.result()
|
@@ -4,18 +4,19 @@ import numpy as np
|
|
4
4
|
from clarifai_grpc.grpc.api import resources_pb2
|
5
5
|
from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
|
6
6
|
from PIL import Image
|
7
|
-
from pydantic import BaseModel, ConfigDict, PrivateAttr, computed_field
|
8
7
|
|
9
8
|
from clarifai.client.auth.helper import ClarifaiAuthHelper
|
10
9
|
|
11
10
|
from .data_utils import bytes_to_image, image_to_bytes
|
12
11
|
|
13
12
|
|
14
|
-
class BaseDataHandler
|
15
|
-
_proto: Union[resources_pb2.Input, resources_pb2.Output]
|
16
|
-
_auth: ClarifaiAuthHelper = PrivateAttr(default=None)
|
13
|
+
class BaseDataHandler:
|
17
14
|
|
18
|
-
|
15
|
+
def __init__(self,
|
16
|
+
proto: Union[resources_pb2.Input, resources_pb2.Output],
|
17
|
+
auth: ClarifaiAuthHelper = None):
|
18
|
+
self._proto = proto
|
19
|
+
self._auth = auth
|
19
20
|
|
20
21
|
#
|
21
22
|
def to_python(self):
|
@@ -27,11 +28,8 @@ class BaseDataHandler(BaseModel):
|
|
27
28
|
def proto(self):
|
28
29
|
return self._proto
|
29
30
|
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
# status
|
34
|
-
@computed_field
|
31
|
+
# Status
|
32
|
+
@property
|
35
33
|
def status(self) -> status_pb2.Status:
|
36
34
|
return self._proto.status
|
37
35
|
|
@@ -40,7 +38,7 @@ class BaseDataHandler(BaseModel):
|
|
40
38
|
self._proto.status.description = description
|
41
39
|
|
42
40
|
# Text
|
43
|
-
@
|
41
|
+
@property
|
44
42
|
def text(self) -> Union[None, str]:
|
45
43
|
data = self._proto.data.text
|
46
44
|
text = None
|
@@ -48,16 +46,14 @@ class BaseDataHandler(BaseModel):
|
|
48
46
|
if data.raw:
|
49
47
|
text = data.raw
|
50
48
|
else:
|
51
|
-
# url = data.url
|
52
49
|
raise NotImplementedError
|
53
|
-
|
54
50
|
return text
|
55
51
|
|
56
52
|
def set_text(self, text: str):
|
57
53
|
self._proto.data.text.raw = text
|
58
54
|
|
59
55
|
# Image
|
60
|
-
@
|
56
|
+
@property
|
61
57
|
def image(self, format: str = "np") -> Union[None, Image.Image, np.ndarray]:
|
62
58
|
data = self._proto.data.image
|
63
59
|
image = None
|
@@ -66,13 +62,9 @@ class BaseDataHandler(BaseModel):
|
|
66
62
|
if data.base64:
|
67
63
|
image = data.base64
|
68
64
|
elif data.url:
|
69
|
-
# download url
|
70
|
-
# url = data.url
|
71
|
-
image = ...
|
72
65
|
raise NotImplementedError
|
73
66
|
image = bytes_to_image(image)
|
74
67
|
image = image if not format == "np" else np.asarray(image).astype("uint8")
|
75
|
-
|
76
68
|
return image
|
77
69
|
|
78
70
|
def set_image(self, image: Union[Image.Image, np.ndarray]):
|
@@ -81,21 +73,20 @@ class BaseDataHandler(BaseModel):
|
|
81
73
|
self._proto.data.image.base64 = image_to_bytes(image)
|
82
74
|
|
83
75
|
# Audio
|
84
|
-
@
|
76
|
+
@property
|
85
77
|
def audio(self) -> bytes:
|
86
78
|
data = self._proto.data.audio
|
87
79
|
audio = None
|
88
80
|
if data.ByteSize():
|
89
81
|
if data.base64:
|
90
82
|
audio = data.base64
|
91
|
-
|
92
83
|
return audio
|
93
84
|
|
94
85
|
def set_audio(self, audio: bytes):
|
95
86
|
self._proto.data.audio.base64 = audio
|
96
87
|
|
97
88
|
# Bboxes
|
98
|
-
@
|
89
|
+
@property
|
99
90
|
def bboxes(self, real_coord: bool = False, image_width: int = None,
|
100
91
|
image_height: int = None) -> Tuple[List, List, List]:
|
101
92
|
if real_coord:
|
@@ -123,15 +114,13 @@ class BaseDataHandler(BaseModel):
|
|
123
114
|
|
124
115
|
return xyxy, scores, concepts
|
125
116
|
|
126
|
-
def set_bboxes(
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
image_height: int = None,
|
134
|
-
):
|
117
|
+
def set_bboxes(self,
|
118
|
+
boxes: list,
|
119
|
+
scores: list,
|
120
|
+
concepts: list,
|
121
|
+
real_coord: bool = False,
|
122
|
+
image_width: int = None,
|
123
|
+
image_height: int = None):
|
135
124
|
if real_coord:
|
136
125
|
assert (image_height and
|
137
126
|
image_width), "image_height and image_width are required when `real_coord` is set"
|
@@ -159,7 +148,7 @@ class BaseDataHandler(BaseModel):
|
|
159
148
|
self._proto.data.regions = regions
|
160
149
|
|
161
150
|
# Concepts
|
162
|
-
@
|
151
|
+
@property
|
163
152
|
def concepts(self) -> Dict[str, float]:
|
164
153
|
con_scores = {}
|
165
154
|
for each in self.proto.data.concepts:
|
@@ -177,7 +166,7 @@ class BaseDataHandler(BaseModel):
|
|
177
166
|
self._proto.data.concepts.append(each)
|
178
167
|
|
179
168
|
# Embeddings
|
180
|
-
@
|
169
|
+
@property
|
181
170
|
def embeddings(self) -> List[List[float]]:
|
182
171
|
return [each.vector for each in self.proto.data.embeddings]
|
183
172
|
|
@@ -193,8 +182,7 @@ class BaseDataHandler(BaseModel):
|
|
193
182
|
# Constructors
|
194
183
|
@classmethod
|
195
184
|
def from_proto(cls, proto):
|
196
|
-
clss = cls()
|
197
|
-
clss.set_proto(proto)
|
185
|
+
clss = cls(proto=proto)
|
198
186
|
return clss
|
199
187
|
|
200
188
|
@classmethod
|
@@ -209,7 +197,7 @@ class BaseDataHandler(BaseModel):
|
|
209
197
|
concepts: Dict[str, float] = {},
|
210
198
|
embeddings: List[List[float]] = [],
|
211
199
|
) -> 'OutputDataHandler':
|
212
|
-
clss = cls()
|
200
|
+
clss = cls(proto=resources_pb2.Output())
|
213
201
|
if isinstance(image, Image.Image) or isinstance(image, np.ndarray):
|
214
202
|
clss.set_image(image)
|
215
203
|
if text:
|
@@ -224,21 +212,20 @@ class BaseDataHandler(BaseModel):
|
|
224
212
|
clss.set_embeddings(embeddings)
|
225
213
|
|
226
214
|
clss.set_status(code=status_code, description=status_description)
|
227
|
-
|
228
215
|
return clss
|
229
216
|
|
230
217
|
|
231
218
|
class InputDataHandler(BaseDataHandler):
|
232
|
-
_proto: resources_pb2.Input = resources_pb2.Input()
|
233
219
|
|
234
|
-
def
|
235
|
-
|
236
|
-
|
220
|
+
def __init__(self,
|
221
|
+
proto: resources_pb2.Input = resources_pb2.Input(),
|
222
|
+
auth: ClarifaiAuthHelper = None):
|
223
|
+
super().__init__(proto=proto, auth=auth)
|
237
224
|
|
238
225
|
|
239
226
|
class OutputDataHandler(BaseDataHandler):
|
240
|
-
_proto: resources_pb2.Output = resources_pb2.Output()
|
241
227
|
|
242
|
-
def
|
243
|
-
|
244
|
-
|
228
|
+
def __init__(self,
|
229
|
+
proto: resources_pb2.Output = resources_pb2.Output(),
|
230
|
+
auth: ClarifaiAuthHelper = None):
|
231
|
+
super().__init__(proto=proto, auth=auth)
|
@@ -28,7 +28,7 @@ class HuggingFaceLoarder:
|
|
28
28
|
raise ImportError(
|
29
29
|
"The 'huggingface_hub' package is not installed. Please install it using 'pip install huggingface_hub'."
|
30
30
|
)
|
31
|
-
if os.path.exists(checkpoint_path):
|
31
|
+
if os.path.exists(checkpoint_path) and self.validate_download(checkpoint_path):
|
32
32
|
print("Checkpoints already exist")
|
33
33
|
else:
|
34
34
|
os.makedirs(checkpoint_path, exist_ok=True)
|
@@ -41,10 +41,11 @@ class HuggingFaceLoarder:
|
|
41
41
|
except Exception as e:
|
42
42
|
print("Error downloading model checkpoints ", e)
|
43
43
|
return False
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
44
|
+
finally:
|
45
|
+
is_downloaded = self.validate_download(checkpoint_path)
|
46
|
+
if not is_downloaded:
|
47
|
+
print("Error downloading model checkpoints")
|
48
|
+
return False
|
48
49
|
return True
|
49
50
|
|
50
51
|
def validate_hf_model(self,):
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import os
|
2
|
+
import uuid
|
2
3
|
from typing import Any, Dict, List
|
3
4
|
|
4
5
|
from clarifai.errors import UserError
|
@@ -69,3 +70,8 @@ def concept_relations_accumulation(relations_dict: Dict[str, Any], subject_conce
|
|
69
70
|
relations_dict[object_concept] = []
|
70
71
|
relations_dict[subject_concept] = []
|
71
72
|
return relations_dict
|
73
|
+
|
74
|
+
|
75
|
+
def get_uuid(val: int) -> str:
|
76
|
+
"""Generates a UUID."""
|
77
|
+
return uuid.uuid4().hex[:val]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: clarifai
|
3
|
-
Version: 10.8.
|
3
|
+
Version: 10.8.5
|
4
4
|
Summary: Clarifai Python SDK
|
5
5
|
Home-page: https://github.com/Clarifai/clarifai-python
|
6
6
|
Author: Clarifai
|
@@ -32,6 +32,7 @@ Requires-Dist: Pillow>=9.5.0
|
|
32
32
|
Requires-Dist: inquirerpy==0.3.4
|
33
33
|
Requires-Dist: tabulate>=0.9.0
|
34
34
|
Requires-Dist: protobuf==5.27.3
|
35
|
+
Requires-Dist: fsspec==2024.6.1
|
35
36
|
Provides-Extra: all
|
36
37
|
Requires-Dist: pycocotools==2.0.6; extra == "all"
|
37
38
|
|
@@ -42,6 +42,7 @@ clarifai/datasets/upload/__init__.py
|
|
42
42
|
clarifai/datasets/upload/base.py
|
43
43
|
clarifai/datasets/upload/features.py
|
44
44
|
clarifai/datasets/upload/image.py
|
45
|
+
clarifai/datasets/upload/multimodal.py
|
45
46
|
clarifai/datasets/upload/text.py
|
46
47
|
clarifai/datasets/upload/utils.py
|
47
48
|
clarifai/datasets/upload/loaders/README.md
|
@@ -52,50 +53,6 @@ clarifai/datasets/upload/loaders/imagenet_classification.py
|
|
52
53
|
clarifai/datasets/upload/loaders/xview_detection.py
|
53
54
|
clarifai/models/__init__.py
|
54
55
|
clarifai/models/api.py
|
55
|
-
clarifai/models/model_serving/README.md
|
56
|
-
clarifai/models/model_serving/__init__.py
|
57
|
-
clarifai/models/model_serving/constants.py
|
58
|
-
clarifai/models/model_serving/utils.py
|
59
|
-
clarifai/models/model_serving/cli/__init__.py
|
60
|
-
clarifai/models/model_serving/cli/_utils.py
|
61
|
-
clarifai/models/model_serving/cli/base.py
|
62
|
-
clarifai/models/model_serving/cli/build.py
|
63
|
-
clarifai/models/model_serving/cli/clarifai_clis.py
|
64
|
-
clarifai/models/model_serving/cli/create.py
|
65
|
-
clarifai/models/model_serving/cli/example_cli.py
|
66
|
-
clarifai/models/model_serving/cli/login.py
|
67
|
-
clarifai/models/model_serving/cli/upload.py
|
68
|
-
clarifai/models/model_serving/docs/cli.md
|
69
|
-
clarifai/models/model_serving/docs/concepts.md
|
70
|
-
clarifai/models/model_serving/docs/dependencies.md
|
71
|
-
clarifai/models/model_serving/docs/inference_parameters.md
|
72
|
-
clarifai/models/model_serving/docs/model_types.md
|
73
|
-
clarifai/models/model_serving/model_config/__init__.py
|
74
|
-
clarifai/models/model_serving/model_config/base.py
|
75
|
-
clarifai/models/model_serving/model_config/config.py
|
76
|
-
clarifai/models/model_serving/model_config/inference_parameter.py
|
77
|
-
clarifai/models/model_serving/model_config/output.py
|
78
|
-
clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml
|
79
|
-
clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml
|
80
|
-
clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml
|
81
|
-
clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml
|
82
|
-
clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml
|
83
|
-
clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml
|
84
|
-
clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml
|
85
|
-
clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml
|
86
|
-
clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml
|
87
|
-
clarifai/models/model_serving/model_config/triton/__init__.py
|
88
|
-
clarifai/models/model_serving/model_config/triton/serializer.py
|
89
|
-
clarifai/models/model_serving/model_config/triton/triton_config.py
|
90
|
-
clarifai/models/model_serving/model_config/triton/wrappers.py
|
91
|
-
clarifai/models/model_serving/repo_build/__init__.py
|
92
|
-
clarifai/models/model_serving/repo_build/build.py
|
93
|
-
clarifai/models/model_serving/repo_build/static_files/_requirements.txt
|
94
|
-
clarifai/models/model_serving/repo_build/static_files/base_test.py
|
95
|
-
clarifai/models/model_serving/repo_build/static_files/inference.py
|
96
|
-
clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml
|
97
|
-
clarifai/models/model_serving/repo_build/static_files/test.py
|
98
|
-
clarifai/models/model_serving/repo_build/static_files/triton/model.py
|
99
56
|
clarifai/modules/README.md
|
100
57
|
clarifai/modules/__init__.py
|
101
58
|
clarifai/modules/css.py
|
@@ -1 +0,0 @@
|
|
1
|
-
__version__ = "10.8.3"
|