clarifai 10.0.1__py3-none-any.whl → 10.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. clarifai/client/app.py +23 -43
  2. clarifai/client/base.py +46 -4
  3. clarifai/client/dataset.py +85 -33
  4. clarifai/client/input.py +35 -7
  5. clarifai/client/model.py +192 -11
  6. clarifai/client/module.py +8 -6
  7. clarifai/client/runner.py +3 -1
  8. clarifai/client/search.py +6 -3
  9. clarifai/client/user.py +14 -12
  10. clarifai/client/workflow.py +8 -5
  11. clarifai/datasets/upload/features.py +3 -0
  12. clarifai/datasets/upload/image.py +57 -26
  13. clarifai/datasets/upload/loaders/README.md +3 -4
  14. clarifai/datasets/upload/loaders/xview_detection.py +9 -5
  15. clarifai/datasets/upload/utils.py +23 -7
  16. clarifai/models/model_serving/README.md +113 -121
  17. clarifai/models/model_serving/__init__.py +2 -0
  18. clarifai/models/model_serving/cli/_utils.py +53 -0
  19. clarifai/models/model_serving/cli/base.py +14 -0
  20. clarifai/models/model_serving/cli/build.py +79 -0
  21. clarifai/models/model_serving/cli/clarifai_clis.py +33 -0
  22. clarifai/models/model_serving/cli/create.py +171 -0
  23. clarifai/models/model_serving/cli/example_cli.py +34 -0
  24. clarifai/models/model_serving/cli/login.py +26 -0
  25. clarifai/models/model_serving/cli/upload.py +182 -0
  26. clarifai/models/model_serving/constants.py +20 -0
  27. clarifai/models/model_serving/docs/cli.md +150 -0
  28. clarifai/models/model_serving/docs/concepts.md +229 -0
  29. clarifai/models/model_serving/docs/dependencies.md +1 -1
  30. clarifai/models/model_serving/docs/inference_parameters.md +112 -107
  31. clarifai/models/model_serving/docs/model_types.md +16 -17
  32. clarifai/models/model_serving/model_config/__init__.py +4 -2
  33. clarifai/models/model_serving/model_config/base.py +369 -0
  34. clarifai/models/model_serving/model_config/config.py +219 -224
  35. clarifai/models/model_serving/model_config/inference_parameter.py +5 -0
  36. clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +25 -24
  37. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +19 -18
  38. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +20 -18
  39. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +19 -18
  40. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +19 -18
  41. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +22 -18
  42. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +32 -28
  43. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +19 -18
  44. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +19 -18
  45. clarifai/models/model_serving/{models → model_config}/output.py +8 -0
  46. clarifai/models/model_serving/model_config/triton/__init__.py +14 -0
  47. clarifai/models/model_serving/model_config/{serializer.py → triton/serializer.py} +3 -1
  48. clarifai/models/model_serving/model_config/triton/triton_config.py +182 -0
  49. clarifai/models/model_serving/{models/model_types.py → model_config/triton/wrappers.py} +4 -4
  50. clarifai/models/model_serving/{models → repo_build}/__init__.py +2 -0
  51. clarifai/models/model_serving/repo_build/build.py +198 -0
  52. clarifai/models/model_serving/repo_build/static_files/_requirements.txt +2 -0
  53. clarifai/models/model_serving/repo_build/static_files/base_test.py +169 -0
  54. clarifai/models/model_serving/repo_build/static_files/inference.py +26 -0
  55. clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +25 -0
  56. clarifai/models/model_serving/repo_build/static_files/test.py +40 -0
  57. clarifai/models/model_serving/{models/pb_model.py → repo_build/static_files/triton/model.py} +15 -14
  58. clarifai/models/model_serving/utils.py +21 -0
  59. clarifai/rag/rag.py +67 -23
  60. clarifai/rag/utils.py +21 -5
  61. clarifai/utils/evaluation/__init__.py +427 -0
  62. clarifai/utils/evaluation/helpers.py +522 -0
  63. clarifai/utils/logging.py +7 -0
  64. clarifai/utils/model_train.py +3 -1
  65. clarifai/versions.py +1 -1
  66. {clarifai-10.0.1.dist-info → clarifai-10.1.1.dist-info}/METADATA +58 -10
  67. clarifai-10.1.1.dist-info/RECORD +115 -0
  68. clarifai-10.1.1.dist-info/entry_points.txt +2 -0
  69. clarifai/datasets/upload/loaders/coco_segmentation.py +0 -98
  70. clarifai/models/model_serving/cli/deploy_cli.py +0 -123
  71. clarifai/models/model_serving/cli/model_zip.py +0 -61
  72. clarifai/models/model_serving/cli/repository.py +0 -89
  73. clarifai/models/model_serving/docs/custom_config.md +0 -33
  74. clarifai/models/model_serving/docs/output.md +0 -28
  75. clarifai/models/model_serving/models/default_test.py +0 -281
  76. clarifai/models/model_serving/models/inference.py +0 -50
  77. clarifai/models/model_serving/models/test.py +0 -64
  78. clarifai/models/model_serving/pb_model_repository.py +0 -108
  79. clarifai-10.0.1.dist-info/RECORD +0 -103
  80. clarifai-10.0.1.dist-info/entry_points.txt +0 -4
  81. {clarifai-10.0.1.dist-info → clarifai-10.1.1.dist-info}/LICENSE +0 -0
  82. {clarifai-10.0.1.dist-info → clarifai-10.1.1.dist-info}/WHEEL +0 -0
  83. {clarifai-10.0.1.dist-info → clarifai-10.1.1.dist-info}/top_level.txt +0 -0
@@ -1,281 +0,0 @@
1
- import dataclasses
2
- import inspect
3
- import logging
4
- import os
5
- import unittest
6
- from typing import Any, Dict, Union
7
-
8
- import numpy as np
9
-
10
- from ..model_config import ModelTypes
11
- from ..model_config.config import get_model_config
12
- from ..model_config.inference_parameter import InferParamManager
13
- from .output import (ClassifierOutput, EmbeddingOutput, ImageOutput, MasksOutput, TextOutput,
14
- VisualDetectorOutput)
15
-
16
- PREDEFINED_TEXTS = ["Photo of a cat", "A cat is playing around"]
17
-
18
- PREDEFINED_IMAGES = [
19
- np.zeros((100, 100, 3), dtype='uint8'), #black
20
- np.ones((100, 100, 3), dtype='uint8') * 255, #white
21
- np.random.uniform(0, 255, (100, 100, 3)).astype('uint8') #noise
22
- ]
23
-
24
-
25
- class DefaultTestInferenceModel(unittest.TestCase):
26
- """
27
- This file contains test cases:
28
- * Test triton config of current model vs default config
29
- * Test if labels.txt is valid for specific model types
30
- * Test inference with simple inputs
31
- ...
32
- """
33
- __test__ = False
34
-
35
- def triton_get_predictions(self, input_data, **kwargs):
36
- """Call InferenceModel.get_predictions method
37
-
38
- Args:
39
- input_data (Union[np.ndarray, str]):
40
- if model receives image or vector then type is `np.ndarray`, otherwise `string`
41
-
42
- Returns:
43
- One of types in models.output
44
- """
45
- _kwargs = self.inference_parameters.validate(**kwargs)
46
- return inspect.unwrap(self.triton_python_model.inference_obj.get_predictions)(
47
- self.triton_python_model.inference_obj, input_data, **_kwargs)
48
-
49
- def _get_preprocess(self, input):
50
- """ preprocess if input is image """
51
- if "image" in input.name:
52
- h, w, _ = input.dims
53
- if h > -1 and w > -1:
54
- import cv2
55
-
56
- def _f(x):
57
- logging.info(f"Preprocess reshape image => {(w, h, 3)}")
58
- return cv2.resize(x, (w, h))
59
-
60
- return _f
61
-
62
- return lambda x: x
63
-
64
- def intitialize(self,
65
- model_type: str,
66
- repo_version_dir: str,
67
- is_instance_kind_gpu: bool = True,
68
- inference_parameters: Union[str, Dict[str, Any]] = ""):
69
- import sys
70
- #
71
- if 'inference' in sys.modules:
72
- del sys.modules['inference']
73
- sys.path.append(repo_version_dir)
74
- self.model_type = model_type
75
- self.is_instance_kind_gpu = is_instance_kind_gpu
76
- logging.info(self.model_type)
77
-
78
- # load inference parameters
79
- if isinstance(inference_parameters, str):
80
- self.inference_parameters = InferParamManager(json_path=inference_parameters)
81
- else:
82
- self.inference_parameters = InferParamManager.from_kwargs(**inference_parameters)
83
- exported_file_path = os.path.join(repo_version_dir, "inference_parameters.json")
84
- logging.info(f"Export inference parameters to `{exported_file_path}` when loading from dict")
85
- self.inference_parameters.export(exported_file_path)
86
-
87
- # Construct TritonPythonModel object
88
- from model import TritonPythonModel
89
- self.triton_python_model = TritonPythonModel()
90
- self.triton_python_model.initialize(
91
- dict(
92
- model_repository=os.path.join(repo_version_dir, ".."),
93
- model_instance_kind="GPU" if self.is_instance_kind_gpu else "cpu"))
94
- # Get default config of model and model_type
95
- self.default_triton_model_config = get_model_config(self.model_type).make_triton_model_config(
96
- model_name=self.model_type, model_version="1", image_shape=[-1, -1])
97
- # Get current model config
98
- self.triton_model_config = self.triton_python_model.config_msg
99
- self.input_name_to_config = {each.name: each
100
- for each in self.triton_model_config.input} # name: input
101
- self.preprocess = {
102
- k: self._get_preprocess(input)
103
- for k, input in self.input_name_to_config.items()
104
- }
105
- # load labels
106
- self._required_label_model_types = [
107
- ModelTypes.visual_detector, ModelTypes.visual_classifier, ModelTypes.text_classifier,
108
- ModelTypes.visual_segmenter
109
- ]
110
- self._output_text_models = [ModelTypes.text_to_text]
111
- self.labels = []
112
- if self.model_type in self._required_label_model_types:
113
- with open(os.path.join(repo_version_dir, "../labels.txt"), 'r') as fp:
114
- labels = fp.readlines()
115
- if labels:
116
- self.labels = [line for line in labels if line]
117
-
118
- def test_triton_config(self):
119
- """ test Triton config"""
120
- # check if input names are still matched
121
- default_input_names = [each.name for each in self.default_triton_model_config.input]
122
- current_input_names = [each.name for each in self.triton_model_config.input]
123
- default_input_names.sort()
124
- current_input_names.sort()
125
- self.assertEqual(current_input_names, default_input_names,
126
- "input name of current model vs generated model must be matched "
127
- f"{current_input_names} != {default_input_names}")
128
- # check if output names are still matched
129
- default_output_names = [each.name for each in self.default_triton_model_config.output]
130
- current_output_names = [each.name for each in self.triton_model_config.output]
131
- default_output_names.sort()
132
- current_output_names.sort()
133
- self.assertEqual(current_output_names, default_output_names,
134
- "output name of current model vs generated model must be matched "
135
- f"{current_output_names} not in {default_output_names}")
136
-
137
- def test_having_labels(self):
138
- if self.model_type in self._required_label_model_types:
139
- self.assertTrue(
140
- len(self.labels),
141
- f"`labels.txt` is empty!. Model type `{self.model_type}` requires input labels in `labels.txt`"
142
- )
143
-
144
- def test_inference_with_predefined_inputs(self):
145
- """ Test Inference with predefined inputs """
146
-
147
- def _is_valid_logit(x: np.array):
148
- return np.all(0 <= x) and np.all(x <= 1)
149
-
150
- def _is_non_negative(x: np.array):
151
- return np.all(x >= 0)
152
-
153
- def _is_integer(x):
154
- return np.all(np.equal(np.mod(x, 1), 0))
155
-
156
- if len(self.input_name_to_config) == 1:
157
- if "image" in self.preprocess:
158
- inputs = [self.preprocess["image"](inp) for inp in PREDEFINED_IMAGES]
159
- else:
160
- inputs = PREDEFINED_TEXTS
161
- outputs = self.triton_get_predictions(inputs)
162
-
163
- # Test for specific model type:
164
- # 1. length of output array vs config
165
- # 2. type of outputs
166
- # 3. test range value, shape and dtype of output
167
-
168
- for inp, output in zip(inputs, outputs):
169
-
170
- field = dataclasses.fields(output)[0].name
171
- if self.model_type not in self._output_text_models:
172
- self.assertEqual(
173
- len(self.triton_model_config.output[0].dims),
174
- len(getattr(output, field).shape),
175
- "Length of 'dims' of config and output must be matched, but get "
176
- f"Config {len(self.triton_model_config.output[0].dims)} != Output {len(getattr(output, field).shape)}"
177
- )
178
-
179
- if self.model_type == ModelTypes.visual_detector:
180
- logging.info(output.predicted_labels)
181
- self.assertEqual(
182
- type(output), VisualDetectorOutput,
183
- f"Output type must be `VisualDetectorOutput`, but got {type(output)}")
184
- self.assertTrue(
185
- _is_valid_logit(output.predicted_scores),
186
- "`predicted_scores` must be in range [0, 1]")
187
- self.assertTrue(
188
- _is_non_negative(output.predicted_bboxes), "`predicted_bboxes` must be >= 0")
189
- self.assertTrue(
190
- np.all(0 <= output.predicted_labels) and
191
- np.all(output.predicted_labels < len(self.labels)),
192
- f"`predicted_labels` must be in [0, {len(self.labels) - 1}]")
193
- self.assertTrue(
194
- _is_integer(output.predicted_labels), "`predicted_labels` must be integer")
195
-
196
- elif self.model_type == ModelTypes.visual_classifier:
197
- self.assertEqual(
198
- type(output), ClassifierOutput,
199
- f"Output type must be `ClassifierOutput`, but got {type(output)}")
200
- self.assertTrue(
201
- _is_valid_logit(output.predicted_scores),
202
- "`predicted_scores` must be in range [0, 1]")
203
- if self.labels:
204
- self.assertEqual(
205
- len(output.predicted_scores),
206
- len(self.labels),
207
- f"`predicted_labels` must equal to {len(self.labels)}, however got {len(output.predicted_scores)}"
208
- )
209
-
210
- elif self.model_type == ModelTypes.text_classifier:
211
- self.assertEqual(
212
- type(output), ClassifierOutput,
213
- f"Output type must be `ClassifierOutput`, but got {type(output)}")
214
- self.assertTrue(
215
- _is_valid_logit(output.predicted_scores),
216
- "`predicted_scores` must be in range [0, 1]")
217
- if self.labels:
218
- self.assertEqual(
219
- len(output.predicted_scores),
220
- len(self.labels),
221
- f"`predicted_labels` must equal to {len(self.labels)}, however got {len(output.predicted_scores)}"
222
- )
223
-
224
- elif self.model_type == ModelTypes.text_embedder:
225
- self.assertEqual(
226
- type(output), EmbeddingOutput,
227
- f"Output type must be `EmbeddingOutput`, but got {type(output)}")
228
- self.assertNotEqual(output.embedding_vector.shape, [])
229
-
230
- elif self.model_type == ModelTypes.text_to_text:
231
- self.assertEqual(
232
- type(output), TextOutput,
233
- f"Output type must be `TextOutput`, but got {type(output)}")
234
-
235
- elif self.model_type == ModelTypes.text_to_image:
236
- self.assertEqual(
237
- type(output), ImageOutput,
238
- f"Output type must be `ImageOutput`, but got {type(output)}")
239
- self.assertTrue(_is_non_negative(output.image), "`image` elements must be >= 0")
240
-
241
- elif self.model_type == ModelTypes.visual_embedder:
242
- self.assertEqual(
243
- type(output), EmbeddingOutput,
244
- f"Output type must be `EmbeddingOutput`, but got {type(output)}")
245
- self.assertNotEqual(output.embedding_vector.shape, [])
246
-
247
- elif self.model_type == ModelTypes.visual_segmenter:
248
- self.assertEqual(
249
- type(output), MasksOutput,
250
- f"Output type must be `MasksOutput`, but got {type(output)}")
251
- self.assertTrue(_is_integer(output.predicted_mask), "`predicted_mask` must be integer")
252
- if self.labels:
253
- self.assertTrue(
254
- np.all(0 <= output.predicted_mask) and
255
- np.all(output.predicted_mask < len(self.labels)),
256
- f"`predicted_mask` must be in [0, {len(self.labels) - 1}]")
257
-
258
- elif len(self.input_name_to_config) == 2:
259
- from itertools import zip_longest
260
- if self.model_type == ModelTypes.multimodal_embedder:
261
- input_images = [self.preprocess["image"](inp) for inp in PREDEFINED_IMAGES]
262
- input_texts = PREDEFINED_TEXTS
263
-
264
- def _assert(input_data):
265
- batch_inputs = []
266
- for group in zip_longest(*input_data.values()):
267
- _input = dict(zip(input_data, group))
268
- batch_inputs.append(_input)
269
- outputs = self.triton_get_predictions(input_data=batch_inputs)
270
- for output in outputs:
271
- self.assertEqual(
272
- type(output), EmbeddingOutput,
273
- f"Output type must be `EmbeddingOutput`, but got {type(output)}")
274
- self.assertNotEqual(output.embedding_vector.shape, [])
275
-
276
- _assert(dict(image=input_images, text=[]))
277
- _assert(dict(image=[], text=input_texts))
278
-
279
-
280
- if __name__ == '__main__':
281
- unittest.main()
@@ -1,50 +0,0 @@
1
- # This file contains boilerplate code to allow users write their model
2
- # inference code that will then interact with the Triton Inference Server
3
- # Python backend to serve end user requests.
4
- # The module name, module path, class name & get_predictions() method names MUST be maintained as is
5
- # but other methods may be added within the class as deemed fit provided
6
- # they are invoked within the main get_predictions() inference method
7
- # if they play a role in any step of model inference
8
- """User model inference script."""
9
-
10
- import os
11
- from pathlib import Path
12
-
13
- from clarifai.models.model_serving.model_config import ( # noqa # pylint: disable=unused-import
14
- ModelTypes, get_model_config)
15
-
16
- config = get_model_config("MODEL_TYPE_PLACEHOLDER")
17
-
18
-
19
- class InferenceModel:
20
- """User model inference class."""
21
-
22
- def __init__(self) -> None:
23
- """
24
- Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
25
- in this method so they are loaded only once for faster inference.
26
- """
27
- self.base_path: Path = os.path.dirname(__file__)
28
- ## sample model loading code:
29
- #self.checkpoint_path: Path = os.path.join(self.base_path, "your checkpoint filename/path")
30
- #self.model: Callable = <load_your_model_here from checkpoint or folder>
31
-
32
- @config.inference.wrap_func
33
- def get_predictions(self, input_data: list, **kwargs) -> list:
34
- """
35
- Main model inference method.
36
-
37
- Args:
38
- -----
39
- input_data: A list of input data item to predict on.
40
- Input data can be an image or text, etc depending on the model type.
41
-
42
- **kwargs: your inference parameters.
43
-
44
- Returns:
45
- --------
46
- List of one of the `clarifai.models.model_serving.models.output types` or `config.inference.return_type(your_output)`. Refer to the README/docs
47
- """
48
-
49
- # Delete/Comment out line below and add your inference code
50
- raise NotImplementedError()
@@ -1,64 +0,0 @@
1
- import logging
2
- import os
3
- import unittest
4
-
5
- from clarifai.models.model_serving.models.default_test import DefaultTestInferenceModel
6
-
7
-
8
- class CustomTestInferenceModel(DefaultTestInferenceModel):
9
- """
10
- Run this file to test your implementation of InferenceModel in inference.py with default tests of Triton configuration and its output values based on basic predefined inputs
11
- If you want to write custom testcase or just test output value.
12
- Please follow these instrucitons:
13
- 1. Name your test function with prefix "test" so that pytest can execute
14
- 2. In order to obtain output of InferenceModel, call `self.triton_get_predictions(input_data)`.
15
- 3. If your input is `image` and you have set custom size of it when building model repository,
16
- call `self.preprocess(image)` to obtain correct resized input
17
- 4. Run this test by calling
18
- ```bash
19
- pytest ./your_triton_folder/1/test.py
20
- #to see std output
21
- pytest --log-cli-level=INFO -s ./your_triton_folder/1/test.py
22
- ```
23
-
24
- ### Examples:
25
- + test text-to-image output
26
- ```
27
- def test_text_to_image_output(self):
28
- text = "Test text"
29
- output = self.triton_get_predictions(text)
30
- image = output.image # uint8 np.ndarray image
31
- #show or save
32
- ```
33
- + test visual-classifier output
34
- ```
35
- def test_visual_classifier(self):
36
- image = cv2.imread("your/local/image.jpg") # Keep in mind of format of image (BGR or RGB)
37
- output = self.triton_get_predictions(image)
38
- scores = output.predicted_scores # np.ndarray
39
- #process scores to get class id and its score
40
- logger.info(result)
41
- """
42
-
43
- # Insert your inference parameters json path here
44
- # or insert a dictionary of your_parameter_name and value, e.g dict(x=1.5, y="text", c=True)
45
- # or Leave it as "" if you don't have it.
46
- inference_parameters = ""
47
-
48
- ########### Initialization. Do not change it ###########
49
- __test__ = True
50
-
51
- def setUp(self) -> None:
52
- logging.info("Initializing...")
53
- model_type = "MODEL_TYPE_PLACEHOLDER" # your model type
54
- self.intitialize(
55
- model_type,
56
- repo_version_dir=os.path.dirname(__file__),
57
- is_instance_kind_gpu=True,
58
- inference_parameters=self.inference_parameters)
59
-
60
- ########################################################
61
-
62
-
63
- if __name__ == '__main__':
64
- unittest.main()
@@ -1,108 +0,0 @@
1
- # Copyright 2023 Clarifai, Inc.
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- """
14
- Triton python backend inference model controller.
15
- """
16
-
17
- import inspect
18
- import logging
19
- import os
20
- from pathlib import Path
21
- from typing import Type
22
-
23
- logging.getLogger("clarifai.models.model_serving.model_config.config").setLevel(logging.ERROR)
24
-
25
- from .model_config import Serializer, TritonModelConfig # noqa: E402
26
- from .models import inference, pb_model, test # noqa: E402
27
-
28
-
29
- class TritonModelRepository:
30
- """
31
- Triton Python BE Model Repository Generator.
32
- """
33
-
34
- def __init__(self, model_config: Type[TritonModelConfig]):
35
- self.model_config = model_config
36
- self.config_proto = Serializer(model_config)
37
-
38
- def _module_to_file(self, module, file_path: str, func: callable = None):
39
- """
40
- Write Python Module to file.
41
-
42
- Args:
43
- -----
44
- module: Python module to write to file
45
- file_path: Path of file to write module code into.
46
- func: A function to process code of module. It contains only 1 argument, text of module. If it is None, then only save text to `file_path`
47
- Returns:
48
- --------
49
- None
50
- """
51
- source_code = inspect.getsource(module)
52
- with open(file_path, "w") as fp:
53
- # change model type
54
- if func:
55
- source_code = func(source_code)
56
- # write it to file
57
- fp.write(source_code)
58
-
59
- def build_repository(self, repository_dir: Path = os.curdir):
60
- """
61
- Generate Triton Model Repository.
62
-
63
- Args:
64
- -----
65
- repository_dir: Directory to create triton model repository
66
-
67
- Returns:
68
- --------
69
- None
70
- """
71
- model_repository = self.model_config.model_name
72
- model_version = self.model_config.model_version
73
- repository_path = os.path.join(repository_dir, model_repository)
74
- model_version_path = os.path.join(repository_path, model_version)
75
-
76
- if not os.path.isdir(repository_path):
77
- os.mkdir(repository_path)
78
- self.config_proto.to_file(repository_path)
79
- for out_field in self.model_config.output:
80
- #predicted int labels must have corresponding names in file
81
- if hasattr(out_field, "label_filename"):
82
- with open(os.path.join(repository_path, "labels.txt"), "w"):
83
- pass
84
- else:
85
- continue
86
- # gen requirements
87
- with open(os.path.join(repository_path, "requirements.txt"), "w") as f:
88
- f.write("clarifai>9.10.4\ntritonclient[all]") # for model upload utils
89
-
90
- if not os.path.isdir(model_version_path):
91
- os.mkdir(model_version_path)
92
- if not os.path.exists(os.path.join(model_version_path, "__init__.py")):
93
- with open(os.path.join(model_version_path, "__init__.py"), "w"):
94
- pass
95
- # generate model.py
96
- model_py_path = os.path.join(model_version_path, "model.py")
97
- self._module_to_file(pb_model, model_py_path, func=None)
98
-
99
- # generate inference.py
100
- def insert_model_type_func(x):
101
- return x.replace("MODEL_TYPE_PLACEHOLDER", self.model_config.model_type)
102
-
103
- inference_py_path = os.path.join(model_version_path, "inference.py")
104
- self._module_to_file(inference, inference_py_path, insert_model_type_func)
105
-
106
- # generate test.py
107
- custom_test_path = os.path.join(model_version_path, "test.py")
108
- self._module_to_file(test, custom_test_path, insert_model_type_func)
@@ -1,103 +0,0 @@
1
- clarifai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- clarifai/errors.py,sha256=RwzTajwds51wLD0MVlMC5kcpBnzRpreDLlazPSBZxrg,2605
4
- clarifai/versions.py,sha256=rCj3CUJhyCdprBtggBLIbWPaQXWUvrUAJoppcOb3WpM,186
5
- clarifai/client/__init__.py,sha256=xI1U0l5AZdRThvQAXCLsd9axxyFzXXJ22m8LHqVjQRU,662
6
- clarifai/client/app.py,sha256=AWUngtwF6a0uS9DInf2arz5Aa-vbxt2Ce5WSNvwlAo0,26946
7
- clarifai/client/base.py,sha256=A_eRK2MfnrVrcM1RXazzPmFuD0Lv2qkEPRrSR-O5Dz4,4909
8
- clarifai/client/dataset.py,sha256=_ADceLu9YsdfJvZtNQ3Bt3IsZM4A2W0WUFWrz02yd9I,21110
9
- clarifai/client/input.py,sha256=6Q_GFVNvoUd2sEpoAUwBkvRT-OCKxUmxkanXgGiQP0o,38446
10
- clarifai/client/lister.py,sha256=03KGMvs5RVyYqxLsSrWhNc34I8kiF1Ph0NeyEwu7nMU,2082
11
- clarifai/client/model.py,sha256=eOUQ2zPyqdTcYKM8_RJjdLOGJIbuRVvPQhZWtVP-quA,24619
12
- clarifai/client/module.py,sha256=-FVQ7fxBdcox7S3iGxHBkQuT-w6s00IysunWkLze3Y4,3766
13
- clarifai/client/runner.py,sha256=nP6QKs8Hy_52skr4gBNAfmPaTcYg20qzZDXnM2IxGlM,9679
14
- clarifai/client/search.py,sha256=pqX3BJmL8V1RKIGuNkbciDNYGoMwJj3k84B9OvpKl10,10555
15
- clarifai/client/user.py,sha256=6sOoHiBSHKz6zfEh4cjBbUe5CgmYs96RgHdcMmPoKys,9914
16
- clarifai/client/workflow.py,sha256=Oqs6X8FocwkDlnYqcvh4qxsWuleTy_-2ByCe2AmsF2M,9983
17
- clarifai/client/auth/__init__.py,sha256=7EwR0NrozkAUwpUnCsqXvE_p0wqx_SelXlSpKShKJK0,136
18
- clarifai/client/auth/helper.py,sha256=3lCKo24ZIOlcSh50juJh3ZDagOo_pxEKyoPjWUokYoA,13450
19
- clarifai/client/auth/register.py,sha256=2CMdBsoVLoTfjyksE6j7BM2tiEc73WKYvxnwDDgNn1k,536
20
- clarifai/client/auth/stub.py,sha256=KIzJZ8aRB1RzXJeWHDAx19HNdBsblPPHwYLfAkgI3rY,3779
21
- clarifai/constants/dataset.py,sha256=2QlHF0NMXfAdFlOpEzkNYVZcxSL-dIxq-ZsY_LsIPBA,499
22
- clarifai/constants/model.py,sha256=LsMkLVkuBpfS4j4yDW9M4O7HxzRpIuSo9qU5T8Wg2Co,217
23
- clarifai/constants/rag.py,sha256=WcHwToUVIK9ItAhDefaSohQHCLNeR55PSjZ0BFnoZ3U,28
24
- clarifai/constants/search.py,sha256=_g3S-JEvuygiFfMVK3cl4Ry9erZpt8Zo4ilXL2i3DAE,52
25
- clarifai/constants/workflow.py,sha256=cECq1xdvf44MCdtK2AbkiuuwhyL-6OWZdQfYbsLKy_o,33
26
- clarifai/datasets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
- clarifai/datasets/export/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
- clarifai/datasets/export/inputs_annotations.py,sha256=z7kmU9K5m9F5u3iEyCnuKk8Bb97kqGaixm8vJZYT554,9325
29
- clarifai/datasets/upload/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- clarifai/datasets/upload/base.py,sha256=IP4sdBRfThk2l0W1rDWciFrAJnKwVsM-gu4zEslJ2_E,2198
31
- clarifai/datasets/upload/features.py,sha256=liUMZTg0GtHRTZmfNaf7v7rAbrORAYkiN6uoaFp1rz4,1457
32
- clarifai/datasets/upload/image.py,sha256=oWsB0iXI1VVbxU1U2r8Q28SyjdBgrwPeQ0g65Zk-jgY,6300
33
- clarifai/datasets/upload/text.py,sha256=ek29V18x5LqmHqc-nmAljQcud9uRjZx8IV_lDX78zsY,1980
34
- clarifai/datasets/upload/utils.py,sha256=njSaRJ_OwD1k8dY9HStGpMZ9D9KFOop2qNoAs9ANx9Y,8974
35
- clarifai/datasets/upload/loaders/README.md,sha256=ag-3lXuvsKTZapvnqBv824rMrVeX0i9U5v1oqhdhvoo,3038
36
- clarifai/datasets/upload/loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
- clarifai/datasets/upload/loaders/coco_captions.py,sha256=t-IaIXukDk1mFdeeqdwe0hLrBLuaF-cZWl2aumGUAls,1297
38
- clarifai/datasets/upload/loaders/coco_detection.py,sha256=dBYl2a1D7e-N1heXbFK0bImJAuq_lPQ8nxZMa1zq-Ts,2612
39
- clarifai/datasets/upload/loaders/coco_segmentation.py,sha256=yu9HBHYdKCllF9-6SdQ_2CaKGskE4DdeqCin7zNTN1c,3628
40
- clarifai/datasets/upload/loaders/imagenet_classification.py,sha256=LuylazxpI5V8fAPGCUxDirGpYMfxzRxix-MEWaCvwxI,1895
41
- clarifai/datasets/upload/loaders/xview_detection.py,sha256=MN8durZdDhbkczMOz8qHHodqy_0sxPw8gZeZpBAjfZo,6010
42
- clarifai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
- clarifai/models/api.py,sha256=d3FQQlG0mNDLrfEvchqaVcq4Tgb_TqryNnJtwp3c7sE,10961
44
- clarifai/models/model_serving/README.md,sha256=hxCmC_erOAto57f2pDlnYG41MTRbgZVwa1RRxoBeClQ,8367
45
- clarifai/models/model_serving/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
46
- clarifai/models/model_serving/constants.py,sha256=KFASN7p9tq2JYSREGMCZ5YwS0x0Gy17lR8QrrL-z-9k,18
47
- clarifai/models/model_serving/pb_model_repository.py,sha256=3x3Rp22euvOaLOQbSf0KkVY5djsWc_85GX9aTSx01B4,3764
48
- clarifai/models/model_serving/cli/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
49
- clarifai/models/model_serving/cli/deploy_cli.py,sha256=lDp_hZdHLAo7IWlin9v5CtNuhkKvu5ucTGDzmf3bkE0,4376
50
- clarifai/models/model_serving/cli/model_zip.py,sha256=TVaHP_4l-WtRn9QJBkhZ7EzaSV9qyhyOUN_h2niNEeE,1866
51
- clarifai/models/model_serving/cli/repository.py,sha256=EJiatCJfBiZ4Z9AYmyBjagBM8fT-iVeD2_Wwh8S7kQ0,2962
52
- clarifai/models/model_serving/docs/custom_config.md,sha256=W1mMIRCI4dVP0czvq5fxOLq1hv0PCBMFRvtZtQwz8kU,1522
53
- clarifai/models/model_serving/docs/dependencies.md,sha256=R9zyE2MbW98DsvFY2ABm4fik0fMPERxrQkhMKMDKpY4,721
54
- clarifai/models/model_serving/docs/inference_parameters.md,sha256=GWEulNgLICR2q9Nd3UtfAFtrCXOEaW5fyXWdCUJJTq8,4270
55
- clarifai/models/model_serving/docs/model_types.md,sha256=R1bCGo-i9FBXHpVP57OnTUZxLlcBuJW7V5X6WcLHMUg,1067
56
- clarifai/models/model_serving/docs/output.md,sha256=IFpwcwhEs7zjIxSDvtYFdmwXVwWal1s5yc-3ohyJEe4,2163
57
- clarifai/models/model_serving/model_config/__init__.py,sha256=wNmIBwhgQaYlVNKYdDqBgbgdnfP4e9vtqKoDbL4cf34,712
58
- clarifai/models/model_serving/model_config/config.py,sha256=uwer6SWddknz-Dhocr49i_vBCiUcgv9vW9epU__hX8E,8186
59
- clarifai/models/model_serving/model_config/inference_parameter.py,sha256=JdrPpEOfbLXXDeEVnlshFC49RdnShGLQ5nmETSFUbWw,3754
60
- clarifai/models/model_serving/model_config/serializer.py,sha256=d-5liwQ7cOZ1maNKPsX7OZL76_O3YTwbJ7K4E5RwYwI,4226
61
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml,sha256=TMsEv9RYiafHyvzvQ8cWx3qECvPtilcKDrKJxtzTQOE,475
62
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml,sha256=zX2CGlyZr0Pg4eIEs3Du-Ymk14fSaMBZUFWVRcNGEm8,358
63
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml,sha256=JUqKZOR10GhcnBTGNwYYtlO5omhsRrt1N-b7DCBlnfs,340
64
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml,sha256=GepiAaDqSnfEMBoX6oJxgqHefbuUyQGE0RhvfBvnVW8,329
65
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml,sha256=-224qTe32EadryifTPykwQouRsaLT-juwxzzv4Bnvw8,317
66
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml,sha256=vcqcPkJUt-q10UljxW6p4L2gfSOCiMipOPnCiCyausE,370
67
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml,sha256=gqM7jR1Z1RCuGn3M4eGqs3viIjwaD88_L6t7wlSjB2I,720
68
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml,sha256=-kWXlM7m2DBSWcjBF1PWThKTXr8pE1eh8vuml0uBB3o,352
69
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml,sha256=3sG07bc6j8Sbxeos8dU0cJH16ZFDkYd9FGkwXiIVyXc,411
70
- clarifai/models/model_serving/models/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
71
- clarifai/models/model_serving/models/default_test.py,sha256=fQ_LSrU8WhxTb0uBV2fWp2VaqZevPPdW-rZ0fDuM8lI,11559
72
- clarifai/models/model_serving/models/inference.py,sha256=wqWOZedNdTgPXZE9kFXpyn7wg_21h-cjUOnsrBgXGak,1892
73
- clarifai/models/model_serving/models/model_types.py,sha256=owGtG7lBd2CXCnm_4KXv0umjSiWt34X9JJWm97tbzmY,8636
74
- clarifai/models/model_serving/models/output.py,sha256=qWlVRxFpqHjlTrrj7Ze3vMniBgqkZhbW1NgX7zNgkfI,3966
75
- clarifai/models/model_serving/models/pb_model.py,sha256=TJJWY-ExATL4M58gYQTI6vAHgY9OCaOjympeZtGdRj8,2533
76
- clarifai/models/model_serving/models/test.py,sha256=0FV2yzv1g-yOlvcHDiI6QL89ULMmx04_euXO3ZjkTb0,2280
77
- clarifai/modules/README.md,sha256=mx8pVx6cPp-pP4LcFPT_nX3ngGmhygVK0WiXeD3cbIo,367
78
- clarifai/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
79
- clarifai/modules/css.py,sha256=kadCEunmyh5h2yf0-4aysE3ZcZ6qaQcxuAgDXS96yF8,2020
80
- clarifai/modules/pages.py,sha256=iOoM3RNRMgXlV0qBqcdQofxoXo2RuRQh0h9c9BIS0-I,1383
81
- clarifai/modules/style.css,sha256=j7FNPZVhLPj35vvBksAJ90RuX5sLuqzDR5iM2WIEhiA,6073
82
- clarifai/rag/__init__.py,sha256=wu3PzAzo7uqgrEzuaC9lY_3gj1HFiR3GU3elZIKTT5g,40
83
- clarifai/rag/rag.py,sha256=ZtaV-d4_ztqACzyrkvWq8ZOmcYkYzoYggtehdjCqUOw,10117
84
- clarifai/rag/utils.py,sha256=lQ9dYd_FlVSuNwa3d18-niIyy_tzhYz2Pr8vB2pUrIQ,3380
85
- clarifai/runners/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
86
- clarifai/runners/example.py,sha256=V0Nc52JkhCm97oaWzKVg71g50M1ltxI9jyPMo6tKU6E,1302
87
- clarifai/runners/example_llama2.py,sha256=WMGTqv3v9t3ID1rjW9BTLMkIuvyTESL6xHcOO6A220Y,2712
88
- clarifai/schema/search.py,sha256=JjTi8ammJgZZ2OGl4K6tIA4zEJ1Fr2ASZARXavI1j5c,2448
89
- clarifai/urls/helper.py,sha256=tjoMGGHuWX68DUB0pk4MEjrmFsClUAQj2jmVEM_Sy78,4751
90
- clarifai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
91
- clarifai/utils/logging.py,sha256=fO0VqJJUZKToKevgKWGZKvqsU9hCDMQTBMzIYtKQnH8,3323
92
- clarifai/utils/misc.py,sha256=cC_j0eEsJ8bfnj0oRd2z-Rms1mQbAfLwrSs07hwQuCE,1420
93
- clarifai/utils/model_train.py,sha256=v4-bsPOOi-jxzwDxdNf2exaWPEpKD7BYcc6w0kMds4o,7832
94
- clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
- clarifai/workflows/export.py,sha256=vICRhIreqDSShxLKjHNM2JwzKsf1B4fdXB0ciMcA70k,1945
96
- clarifai/workflows/utils.py,sha256=nGeB_yjVgUO9kOeKTg4OBBaBz-AwXI3m-huSVj-9W18,1924
97
- clarifai/workflows/validate.py,sha256=iCEKBTtB-57uE3LVU7D4AI9BRHxIxahk3U1Ro08HP-o,2535
98
- clarifai-10.0.1.dist-info/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
99
- clarifai-10.0.1.dist-info/METADATA,sha256=ff_NiIDsxqOph3Xf9TXed59WJaMaBjG5j-9xzhiY7-M,16510
100
- clarifai-10.0.1.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
101
- clarifai-10.0.1.dist-info/entry_points.txt,sha256=cna1vVlFIZZZlxHy1AbhooFGy-dw1W2xRfbOVRSWSKg,255
102
- clarifai-10.0.1.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
103
- clarifai-10.0.1.dist-info/RECORD,,
@@ -1,4 +0,0 @@
1
- [console_scripts]
2
- clarifai-model-upload-init = clarifai.models.model_serving.cli.repository:model_upload_init
3
- clarifai-triton-zip = clarifai.models.model_serving.cli.model_zip:main
4
- clarifai-upload-model = clarifai.models.model_serving.cli.deploy_cli:main