clarifai 10.0.0__py3-none-any.whl → 10.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/client/base.py +8 -1
- clarifai/client/dataset.py +77 -21
- clarifai/client/input.py +6 -6
- clarifai/client/model.py +1 -1
- clarifai/client/module.py +1 -1
- clarifai/client/workflow.py +1 -1
- clarifai/datasets/upload/features.py +3 -0
- clarifai/datasets/upload/image.py +57 -26
- clarifai/datasets/upload/loaders/xview_detection.py +4 -0
- clarifai/datasets/upload/utils.py +23 -7
- clarifai/models/model_serving/README.md +113 -121
- clarifai/models/model_serving/__init__.py +2 -0
- clarifai/models/model_serving/cli/_utils.py +53 -0
- clarifai/models/model_serving/cli/base.py +14 -0
- clarifai/models/model_serving/cli/build.py +79 -0
- clarifai/models/model_serving/cli/clarifai_clis.py +33 -0
- clarifai/models/model_serving/cli/create.py +171 -0
- clarifai/models/model_serving/cli/example_cli.py +34 -0
- clarifai/models/model_serving/cli/login.py +26 -0
- clarifai/models/model_serving/cli/upload.py +182 -0
- clarifai/models/model_serving/constants.py +20 -0
- clarifai/models/model_serving/docs/cli.md +150 -0
- clarifai/models/model_serving/docs/concepts.md +229 -0
- clarifai/models/model_serving/docs/dependencies.md +1 -1
- clarifai/models/model_serving/docs/inference_parameters.md +112 -107
- clarifai/models/model_serving/docs/model_types.md +16 -17
- clarifai/models/model_serving/model_config/__init__.py +4 -2
- clarifai/models/model_serving/model_config/base.py +369 -0
- clarifai/models/model_serving/model_config/config.py +219 -224
- clarifai/models/model_serving/model_config/inference_parameter.py +5 -0
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +25 -24
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +19 -18
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +20 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +19 -18
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +19 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +22 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +32 -28
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +19 -18
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +19 -18
- clarifai/models/model_serving/{models → model_config}/output.py +8 -0
- clarifai/models/model_serving/model_config/triton/__init__.py +14 -0
- clarifai/models/model_serving/model_config/{serializer.py → triton/serializer.py} +3 -1
- clarifai/models/model_serving/model_config/triton/triton_config.py +182 -0
- clarifai/models/model_serving/{models/model_types.py → model_config/triton/wrappers.py} +4 -4
- clarifai/models/model_serving/{models → repo_build}/__init__.py +2 -0
- clarifai/models/model_serving/repo_build/build.py +198 -0
- clarifai/models/model_serving/repo_build/static_files/_requirements.txt +2 -0
- clarifai/models/model_serving/repo_build/static_files/base_test.py +169 -0
- clarifai/models/model_serving/repo_build/static_files/inference.py +26 -0
- clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +25 -0
- clarifai/models/model_serving/repo_build/static_files/test.py +40 -0
- clarifai/models/model_serving/{models/pb_model.py → repo_build/static_files/triton/model.py} +15 -14
- clarifai/models/model_serving/utils.py +21 -0
- clarifai/rag/rag.py +45 -12
- clarifai/rag/utils.py +3 -2
- clarifai/utils/logging.py +7 -0
- clarifai/versions.py +1 -1
- {clarifai-10.0.0.dist-info → clarifai-10.1.0.dist-info}/METADATA +28 -5
- clarifai-10.1.0.dist-info/RECORD +114 -0
- clarifai-10.1.0.dist-info/entry_points.txt +2 -0
- clarifai/models/model_serving/cli/deploy_cli.py +0 -123
- clarifai/models/model_serving/cli/model_zip.py +0 -61
- clarifai/models/model_serving/cli/repository.py +0 -89
- clarifai/models/model_serving/docs/custom_config.md +0 -33
- clarifai/models/model_serving/docs/output.md +0 -28
- clarifai/models/model_serving/models/default_test.py +0 -281
- clarifai/models/model_serving/models/inference.py +0 -50
- clarifai/models/model_serving/models/test.py +0 -64
- clarifai/models/model_serving/pb_model_repository.py +0 -108
- clarifai-10.0.0.dist-info/RECORD +0 -103
- clarifai-10.0.0.dist-info/entry_points.txt +0 -4
- {clarifai-10.0.0.dist-info → clarifai-10.1.0.dist-info}/LICENSE +0 -0
- {clarifai-10.0.0.dist-info → clarifai-10.1.0.dist-info}/WHEEL +0 -0
- {clarifai-10.0.0.dist-info → clarifai-10.1.0.dist-info}/top_level.txt +0 -0
@@ -1,18 +1,19 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
1
|
+
serving_backend:
|
2
|
+
triton:
|
3
|
+
input:
|
4
|
+
- name: text
|
5
|
+
data_type: TYPE_STRING
|
6
|
+
dims: [1]
|
7
|
+
output:
|
8
|
+
- name: image
|
9
|
+
data_type: TYPE_UINT8
|
10
|
+
dims: [-1, -1, 3]
|
11
|
+
label_filename: null
|
12
|
+
clarifai_model:
|
13
|
+
type: text-to-image
|
14
|
+
output_type: ImageOutput
|
15
|
+
field_maps:
|
16
|
+
input_fields_map:
|
17
|
+
text: text
|
18
|
+
output_fields_map:
|
19
|
+
image: image
|
@@ -1,18 +1,19 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
1
|
+
serving_backend:
|
2
|
+
triton:
|
3
|
+
input:
|
4
|
+
- name: text
|
5
|
+
data_type: TYPE_STRING
|
6
|
+
dims: [1]
|
7
|
+
output:
|
8
|
+
- name: text
|
9
|
+
data_type: TYPE_STRING
|
10
|
+
dims: [1]
|
11
|
+
label_filename: null
|
12
|
+
clarifai_model:
|
13
|
+
type: text-to-text
|
14
|
+
output_type: TextOutput
|
15
|
+
field_maps:
|
16
|
+
input_fields_map:
|
17
|
+
text: text
|
18
|
+
output_fields_map:
|
19
|
+
text: text
|
@@ -1,18 +1,22 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
field_maps:
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
1
|
+
serving_backend:
|
2
|
+
triton:
|
3
|
+
input:
|
4
|
+
- name: image
|
5
|
+
data_type: TYPE_UINT8
|
6
|
+
dims: [-1, -1, 3]
|
7
|
+
output:
|
8
|
+
- name: softmax_predictions
|
9
|
+
data_type: TYPE_FP32
|
10
|
+
dims: [-1]
|
11
|
+
label_filename: "labels.txt"
|
12
|
+
|
13
|
+
clarifai_model:
|
14
|
+
field_maps:
|
15
|
+
input_fields_map:
|
16
|
+
image: image
|
17
|
+
output_fields_map:
|
18
|
+
concepts: softmax_predictions
|
19
|
+
output_type: ClassifierOutput
|
20
|
+
type: visual-classifier
|
21
|
+
labels:
|
22
|
+
inference_parameters:
|
@@ -1,28 +1,32 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
field_maps:
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
1
|
+
serving_backend:
|
2
|
+
triton:
|
3
|
+
input:
|
4
|
+
- name: image
|
5
|
+
data_type: TYPE_UINT8
|
6
|
+
dims: [-1, -1, 3]
|
7
|
+
output:
|
8
|
+
- name: predicted_bboxes
|
9
|
+
data_type: TYPE_FP32
|
10
|
+
dims: [-1, 4]
|
11
|
+
label_filename: null
|
12
|
+
- name: predicted_labels
|
13
|
+
data_type: TYPE_INT32
|
14
|
+
dims: [-1, 1]
|
15
|
+
label_filename: "labels.txt"
|
16
|
+
- name: predicted_scores
|
17
|
+
data_type: TYPE_FP32
|
18
|
+
dims: [-1, 1]
|
19
|
+
label_filename: null
|
20
|
+
|
21
|
+
clarifai_model:
|
22
|
+
field_maps:
|
23
|
+
input_fields_map:
|
24
|
+
image: image
|
25
|
+
output_fields_map:
|
26
|
+
"regions[...].region_info.bounding_box": "predicted_bboxes"
|
27
|
+
"regions[...].data.concepts[...].id": "predicted_labels"
|
28
|
+
"regions[...].data.concepts[...].value": "predicted_scores"
|
29
|
+
output_type: VisualDetectorOutput
|
30
|
+
type: visual-detector
|
31
|
+
labels:
|
32
|
+
inference_parameters:
|
@@ -1,18 +1,19 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
1
|
+
serving_backend:
|
2
|
+
triton:
|
3
|
+
input:
|
4
|
+
- name: image
|
5
|
+
data_type: TYPE_UINT8
|
6
|
+
dims: [-1, -1, 3]
|
7
|
+
output:
|
8
|
+
- name: embeddings
|
9
|
+
data_type: TYPE_FP32
|
10
|
+
dims: [-1]
|
11
|
+
label_filename: null
|
12
|
+
clarifai_model:
|
13
|
+
type: visual-embedder
|
14
|
+
output_type: EmbeddingOutput
|
15
|
+
field_maps:
|
16
|
+
input_fields_map:
|
17
|
+
image: image
|
18
|
+
output_fields_map:
|
19
|
+
embeddings: embeddings
|
@@ -1,18 +1,19 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
1
|
+
serving_backend:
|
2
|
+
triton:
|
3
|
+
input:
|
4
|
+
- name: image
|
5
|
+
data_type: TYPE_UINT8
|
6
|
+
dims: [-1, -1, 3]
|
7
|
+
output:
|
8
|
+
- name: predicted_mask
|
9
|
+
data_type: TYPE_INT64
|
10
|
+
dims: [-1, -1]
|
11
|
+
label_filename: "labels.txt"
|
12
|
+
clarifai_model:
|
13
|
+
type: visual-segmenter
|
14
|
+
output_type: MasksOutput
|
15
|
+
field_maps:
|
16
|
+
input_fields_map:
|
17
|
+
image: image
|
18
|
+
output_fields_map:
|
19
|
+
"regions[...].region_info.mask,regions[...].data.concepts": "predicted_mask"
|
@@ -28,6 +28,10 @@ class VisualDetectorOutput:
|
|
28
28
|
"""
|
29
29
|
Validate input upon initialization.
|
30
30
|
"""
|
31
|
+
assert isinstance(self.predicted_scores, np.ndarray), "`predicted_scores` must be numpy array"
|
32
|
+
assert isinstance(self.predicted_labels, np.ndarray), "`predicted_labels` must be numpy array"
|
33
|
+
assert isinstance(self.predicted_scores, np.ndarray), "`predicted_scores` must be numpy array"
|
34
|
+
|
31
35
|
assert self.predicted_bboxes.ndim == self.predicted_labels.ndim == \
|
32
36
|
self.predicted_scores.ndim==2, f"All predictions must be 2-dimensional, \
|
33
37
|
Got bbox-dims: {self.predicted_bboxes.ndim}, label-dims: {self.predicted_labels.ndim}, \
|
@@ -58,6 +62,7 @@ class ClassifierOutput:
|
|
58
62
|
"""
|
59
63
|
Validate input upon initialization.
|
60
64
|
"""
|
65
|
+
assert isinstance(self.predicted_scores, np.ndarray), "`predicted_scores` must be numpy array"
|
61
66
|
assert self.predicted_scores.ndim == 1, \
|
62
67
|
f"All predictions must be 1-dimensional, Got scores-dims: {self.predicted_scores.ndim} instead."
|
63
68
|
|
@@ -89,6 +94,7 @@ class EmbeddingOutput:
|
|
89
94
|
"""
|
90
95
|
Validate input upon initialization.
|
91
96
|
"""
|
97
|
+
assert isinstance(self.embedding_vector, np.ndarray), "`embedding_vector` must be numpy array"
|
92
98
|
assert self.embedding_vector.ndim == 1, \
|
93
99
|
f"Embeddings must be 1-dimensional, Got embedding-dims: {self.embedding_vector.ndim} instead."
|
94
100
|
|
@@ -104,6 +110,7 @@ class MasksOutput:
|
|
104
110
|
"""
|
105
111
|
Validate input upon initialization.
|
106
112
|
"""
|
113
|
+
assert isinstance(self.predicted_mask, np.ndarray), "`predicted_mask` must be numpy array"
|
107
114
|
assert self.predicted_mask.ndim == 2, \
|
108
115
|
f"predicted_mask must be 2-dimensional, Got mask dims: {self.predicted_mask.ndim} instead."
|
109
116
|
|
@@ -119,6 +126,7 @@ class ImageOutput:
|
|
119
126
|
"""
|
120
127
|
Validate input upon initialization.
|
121
128
|
"""
|
129
|
+
assert isinstance(self.image, np.ndarray), "`image` must be numpy array"
|
122
130
|
assert self.image.ndim == 3, \
|
123
131
|
f"Generated image must be 3-dimensional, Got image-dims: {self.image.ndim} instead."
|
124
132
|
assert self.image.shape[2] == 3, \
|
@@ -0,0 +1,14 @@
|
|
1
|
+
# Copyright 2023 Clarifai, Inc.
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3
|
+
# you may not use this file except in compliance with the License.
|
4
|
+
# You may obtain a copy of the License at
|
5
|
+
#
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7
|
+
#
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11
|
+
# See the License for the specific language governing permissions and
|
12
|
+
# limitations under the License.
|
13
|
+
from .serializer import Serializer # noqa # pylint: disable=unused-import
|
14
|
+
from .triton_config import * # noqa # pylint: disable=unused-import
|
@@ -21,7 +21,7 @@ from typing import Type
|
|
21
21
|
from google.protobuf.text_format import MessageToString
|
22
22
|
from tritonclient.grpc import model_config_pb2
|
23
23
|
|
24
|
-
from .
|
24
|
+
from .triton_config import TritonModelConfig
|
25
25
|
|
26
26
|
|
27
27
|
class Serializer:
|
@@ -61,6 +61,8 @@ class Serializer:
|
|
61
61
|
output_config = self.config_proto.output.add()
|
62
62
|
for key, value in out_field.__dict__.items():
|
63
63
|
try:
|
64
|
+
if not value:
|
65
|
+
continue
|
64
66
|
setattr(output_config, key, value)
|
65
67
|
except AttributeError: #Proto Repeated Field assignment not allowed
|
66
68
|
field = getattr(output_config, key)
|
@@ -0,0 +1,182 @@
|
|
1
|
+
# Copyright 2023 Clarifai, Inc.
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3
|
+
# you may not use this file except in compliance with the License.
|
4
|
+
# You may obtain a copy of the License at
|
5
|
+
#
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7
|
+
#
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11
|
+
# See the License for the specific language governing permissions and
|
12
|
+
# limitations under the License.
|
13
|
+
""" Model Config classes."""
|
14
|
+
from __future__ import annotations # isort: skip
|
15
|
+
|
16
|
+
from copy import deepcopy
|
17
|
+
from dataclasses import dataclass, field
|
18
|
+
from typing import Any, List, Union
|
19
|
+
|
20
|
+
from ...constants import IMAGE_TENSOR_NAME, MAX_HW_DIM
|
21
|
+
|
22
|
+
|
23
|
+
### Triton Model Config classes.###
|
24
|
+
@dataclass
|
25
|
+
class DType:
|
26
|
+
"""
|
27
|
+
Triton Model Config data types.
|
28
|
+
"""
|
29
|
+
# https://github.com/triton-inference-server/common/blob/main/protobuf/model_config.proto
|
30
|
+
TYPE_UINT8: int = 2
|
31
|
+
TYPE_INT8: int = 6
|
32
|
+
TYPE_INT16: int = 7
|
33
|
+
TYPE_INT32: int = 8
|
34
|
+
TYPE_INT64: int = 9
|
35
|
+
TYPE_FP16: int = 10
|
36
|
+
TYPE_FP32: int = 11
|
37
|
+
TYPE_STRING: int = 13
|
38
|
+
KIND_GPU: int = 1
|
39
|
+
KIND_CPU: int = 2
|
40
|
+
|
41
|
+
|
42
|
+
@dataclass
|
43
|
+
class InputConfig:
|
44
|
+
"""
|
45
|
+
Triton Input definition.
|
46
|
+
Params:
|
47
|
+
-------
|
48
|
+
name: input name
|
49
|
+
data_type: input data type
|
50
|
+
dims: Pre-defined input data shape(s).
|
51
|
+
|
52
|
+
Returns:
|
53
|
+
--------
|
54
|
+
InputConfig
|
55
|
+
"""
|
56
|
+
name: str
|
57
|
+
data_type: int
|
58
|
+
dims: List = field(default_factory=list)
|
59
|
+
optional: bool = False
|
60
|
+
|
61
|
+
|
62
|
+
@dataclass
|
63
|
+
class OutputConfig:
|
64
|
+
"""
|
65
|
+
Triton Output definition.
|
66
|
+
Params:
|
67
|
+
-------
|
68
|
+
name: output name
|
69
|
+
data_type: output data type
|
70
|
+
dims: Pre-defined output data shape(s).
|
71
|
+
labels (bool): If labels file is required for inference.
|
72
|
+
|
73
|
+
Returns:
|
74
|
+
--------
|
75
|
+
OutputConfig
|
76
|
+
"""
|
77
|
+
name: str
|
78
|
+
data_type: int
|
79
|
+
dims: List = field(default_factory=list)
|
80
|
+
label_filename: str = ""
|
81
|
+
|
82
|
+
|
83
|
+
@dataclass
|
84
|
+
class Device:
|
85
|
+
"""
|
86
|
+
Triton instance_group.
|
87
|
+
Define the type of inference device and number of devices to use.
|
88
|
+
Params:
|
89
|
+
-------
|
90
|
+
count: number of devices
|
91
|
+
use_gpu: whether to use cpu or gpu.
|
92
|
+
|
93
|
+
Returns:
|
94
|
+
--------
|
95
|
+
Device object
|
96
|
+
"""
|
97
|
+
count: int = 1
|
98
|
+
use_gpu: bool = True
|
99
|
+
|
100
|
+
def __post_init__(self):
|
101
|
+
if self.use_gpu:
|
102
|
+
self.kind: str = DType.KIND_GPU
|
103
|
+
else:
|
104
|
+
self.kind: str = DType.KIND_CPU
|
105
|
+
|
106
|
+
|
107
|
+
@dataclass
|
108
|
+
class DynamicBatching:
|
109
|
+
"""
|
110
|
+
Triton dynamic_batching config.
|
111
|
+
Params:
|
112
|
+
-------
|
113
|
+
preferred_batch_size: batch size
|
114
|
+
max_queue_delay_microseconds: max queue delay for a request batch
|
115
|
+
|
116
|
+
Returns:
|
117
|
+
--------
|
118
|
+
DynamicBatching object
|
119
|
+
"""
|
120
|
+
#preferred_batch_size: List[int] = [1] # recommended not to set
|
121
|
+
max_queue_delay_microseconds: int = 500
|
122
|
+
|
123
|
+
|
124
|
+
@dataclass
|
125
|
+
class TritonModelConfig:
|
126
|
+
"""
|
127
|
+
Triton Model Config base.
|
128
|
+
Params:
|
129
|
+
-------
|
130
|
+
name: triton inference model name
|
131
|
+
input: a list of an InputConfig field
|
132
|
+
output: a list of OutputConfig fields/dicts
|
133
|
+
instance_group: Device. see Device
|
134
|
+
dynamic_batching: Triton dynamic batching settings.
|
135
|
+
max_batch_size: max request batch size
|
136
|
+
backend: Triton Python Backend. Constant
|
137
|
+
image_shape: List of Height and Width of input image. *
|
138
|
+
|
139
|
+
(*): This attribute won't be serialized in config.pbtxt
|
140
|
+
|
141
|
+
Returns:
|
142
|
+
--------
|
143
|
+
TritonModelConfig
|
144
|
+
"""
|
145
|
+
#model_type: str
|
146
|
+
model_name: str = ""
|
147
|
+
model_version: str = "1"
|
148
|
+
input: List[InputConfig] = field(default_factory=list)
|
149
|
+
output: List[OutputConfig] = field(default_factory=list)
|
150
|
+
instance_group: Device = field(default_factory=Device)
|
151
|
+
dynamic_batching: DynamicBatching = field(default_factory=DynamicBatching)
|
152
|
+
max_batch_size: int = 1
|
153
|
+
backend: str = "python"
|
154
|
+
image_shape: tuple[Union[int, float], Union[int, float]] = field(
|
155
|
+
default_factory=lambda: [-1, -1]) #(H, W)
|
156
|
+
|
157
|
+
def __setattr__(self, __name: str, __value: Any) -> None:
|
158
|
+
if __name == "image_shape":
|
159
|
+
__value = self._check_and_assign_image_shape_value(__value)
|
160
|
+
|
161
|
+
super().__setattr__(__name, __value)
|
162
|
+
|
163
|
+
def _check_and_assign_image_shape_value(self, value):
|
164
|
+
_has_image = False
|
165
|
+
for each in self.input:
|
166
|
+
if IMAGE_TENSOR_NAME in each.name:
|
167
|
+
_has_image = True
|
168
|
+
if len(value) != 2:
|
169
|
+
raise ValueError(
|
170
|
+
f"image_shape takes 2 values, Height and Width. Got {len(value)} values instead.")
|
171
|
+
if value[0] > MAX_HW_DIM or value[1] > MAX_HW_DIM:
|
172
|
+
raise ValueError(
|
173
|
+
f"H and W each have a maximum value of {MAX_HW_DIM}. Got H: {value[0]}, W: {value[1]}"
|
174
|
+
)
|
175
|
+
image_dims = deepcopy(value)
|
176
|
+
image_dims.append(3) # add channel dim
|
177
|
+
each.dims = image_dims
|
178
|
+
|
179
|
+
if not _has_image and self.input:
|
180
|
+
return [-1, -1]
|
181
|
+
else:
|
182
|
+
return value
|
@@ -41,7 +41,7 @@ def visual_detector(func: Callable):
|
|
41
41
|
out_scores = []
|
42
42
|
|
43
43
|
# input_data passed as list of images
|
44
|
-
preds = func(self, input_data[:], *args, **kwargs)
|
44
|
+
preds = func(self, list(input_data[:]), *args, **kwargs)
|
45
45
|
for pred in preds:
|
46
46
|
out_bboxes.append(pred.predicted_bboxes)
|
47
47
|
out_labels.append(pred.predicted_labels)
|
@@ -79,7 +79,7 @@ def visual_classifier(func: Callable):
|
|
79
79
|
"""
|
80
80
|
out_scores = []
|
81
81
|
# input_data passed as list of images
|
82
|
-
preds = func(self, input_data[:], *args, **kwargs)
|
82
|
+
preds = func(self, list(input_data[:]), *args, **kwargs)
|
83
83
|
|
84
84
|
for pred in preds:
|
85
85
|
out_scores.append(pred.predicted_scores)
|
@@ -185,7 +185,7 @@ def visual_embedder(func: Callable):
|
|
185
185
|
"""
|
186
186
|
out_embeddings = []
|
187
187
|
# input_data passed as list of images
|
188
|
-
preds = func(self, input_data[:], *args, **kwargs)
|
188
|
+
preds = func(self, list(input_data[:]), *args, **kwargs)
|
189
189
|
|
190
190
|
for pred in preds:
|
191
191
|
out_embeddings.append(pred.embedding_vector)
|
@@ -210,7 +210,7 @@ def visual_segmenter(func: Callable):
|
|
210
210
|
"""
|
211
211
|
masks = []
|
212
212
|
# input_data passed as list of images
|
213
|
-
preds = func(self, input_data[:], *args, **kwargs)
|
213
|
+
preds = func(self, list(input_data[:]), *args, **kwargs)
|
214
214
|
|
215
215
|
for pred in preds:
|
216
216
|
masks.append(pred.predicted_mask)
|
@@ -10,3 +10,5 @@
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11
11
|
# See the License for the specific language governing permissions and
|
12
12
|
# limitations under the License.
|
13
|
+
from .build import RepositoryBuilder # noqa
|
14
|
+
from .static_files.base_test import BaseTest # noqa
|