clarifai 9.6.1__py3-none-any.whl → 9.6.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. clarifai/auth/helper.py +3 -3
  2. clarifai/models/model_serving/constants.py +2 -3
  3. clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  4. clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +47 -0
  5. clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +60 -0
  6. clarifai/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  7. clarifai/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +51 -0
  8. clarifai/models/model_serving/examples/visual_embedding/vit-base/1/model.py +60 -0
  9. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  10. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +55 -0
  11. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +60 -0
  12. clarifai/models/model_serving/model_config/deploy.py +27 -3
  13. clarifai/models/model_serving/model_config/triton_config.py +30 -0
  14. clarifai/models/model_serving/models/model_types.py +122 -0
  15. clarifai/models/model_serving/models/output.py +62 -0
  16. clarifai/models/model_serving/models/pb_model.py +0 -1
  17. clarifai/modules/style.css +7 -0
  18. clarifai/runners/base.py +140 -0
  19. clarifai/runners/example.py +36 -0
  20. {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/METADATA +1 -1
  21. {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/RECORD +44 -22
  22. clarifai_utils/auth/helper.py +3 -3
  23. clarifai_utils/models/model_serving/constants.py +2 -3
  24. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
  25. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +47 -0
  26. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +60 -0
  27. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
  28. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +51 -0
  29. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +60 -0
  30. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
  31. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +55 -0
  32. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +60 -0
  33. clarifai_utils/models/model_serving/model_config/deploy.py +27 -3
  34. clarifai_utils/models/model_serving/model_config/triton_config.py +30 -0
  35. clarifai_utils/models/model_serving/models/model_types.py +122 -0
  36. clarifai_utils/models/model_serving/models/output.py +62 -0
  37. clarifai_utils/models/model_serving/models/pb_model.py +0 -1
  38. clarifai_utils/modules/style.css +7 -0
  39. clarifai_utils/runners/base.py +140 -0
  40. clarifai_utils/runners/example.py +36 -0
  41. {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/LICENSE +0 -0
  42. {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/WHEEL +0 -0
  43. {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/entry_points.txt +0 -0
  44. {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,60 @@
1
+ # Copyright 2023 Clarifai, Inc.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ """Triton inference server Python Backend Model."""
14
+
15
+ import os
16
+ import sys
17
+
18
+ try:
19
+ import triton_python_backend_utils as pb_utils
20
+ except ModuleNotFoundError:
21
+ pass
22
+ from google.protobuf import text_format
23
+ from tritonclient.grpc.model_config_pb2 import ModelConfig
24
+
25
+
26
+ class TritonPythonModel:
27
+ """
28
+ Triton Python BE Model.
29
+ """
30
+
31
+ def initialize(self, args):
32
+ """
33
+ Triton server init.
34
+ """
35
+ args["model_repository"] = args["model_repository"].replace("/1/model.py", "")
36
+ sys.path.append(os.path.dirname(__file__))
37
+ from inference import InferenceModel
38
+
39
+ self.inference_obj = InferenceModel()
40
+
41
+ # Read input_name from config file
42
+ self.config_msg = ModelConfig()
43
+ with open(os.path.join(args["model_repository"], "config.pbtxt"), "r") as f:
44
+ cfg = f.read()
45
+ text_format.Merge(cfg, self.config_msg)
46
+ self.input_name = [inp.name for inp in self.config_msg.input][0]
47
+
48
+ def execute(self, requests):
49
+ """
50
+ Serve model inference requests.
51
+ """
52
+ responses = []
53
+
54
+ for request in requests:
55
+ in_batch = pb_utils.get_input_tensor_by_name(request, self.input_name)
56
+ in_batch = in_batch.as_numpy()
57
+ inference_response = self.inference_obj.get_predictions(in_batch)
58
+ responses.append(inference_response)
59
+
60
+ return responses
@@ -36,16 +36,40 @@ class ClarifaiFieldsMap:
36
36
  """
37
37
  Set mapping of clarifai in/output vs triton in/output
38
38
  """
39
+ text_input_fields = {"text": "text"}
40
+ image_input_fields = {"image": "image"}
41
+
42
+ embedding_output_fields = {"embeddings": "embeddings"}
43
+
39
44
  if self.model_type == "visual-detector":
40
- self.input_fields_map = {"image": "image"}
45
+ self.input_fields_map = image_input_fields
41
46
  self.output_fields_map = {
42
47
  "regions[...].region_info.bounding_box": "predicted_bboxes",
43
48
  "regions[...].data.concepts[...].id": "predicted_labels",
44
49
  "regions[...].data.concepts[...].value": "predicted_scores"
45
50
  }
46
51
  elif self.model_type == "visual-classifier":
47
- self.input_fields_map = {"image": "image"}
52
+ self.input_fields_map = image_input_fields
48
53
  self.output_fields_map = {"concepts": "softmax_predictions"}
49
54
  elif self.model_type == "text-classifier":
50
- self.input_fields_map = {"text": "text"}
55
+ self.input_fields_map = text_input_fields
51
56
  self.output_fields_map = {"concepts": "softmax_predictions"}
57
+ elif self.model_type == "text-embedder":
58
+ self.input_fields_map = text_input_fields
59
+ self.output_fields_map = embedding_output_fields
60
+ elif self.model_type == "text-to-text":
61
+ self.input_fields_map = text_input_fields
62
+ # input and output fields are the same for text-to-text
63
+ self.output_fields_map = text_input_fields
64
+ elif self.model_type == "text-to-image":
65
+ self.input_fields_map = text_input_fields
66
+ # image output fields match image_input fields
67
+ self.output_fields_map = image_input_fields
68
+ elif self.model_type == "visual-embedder":
69
+ self.input_fields_map = image_input_fields
70
+ self.output_fields_map = embedding_output_fields
71
+ elif self.model_type == "visual-segmenter":
72
+ self.input_fields_map = image_input_fields
73
+ self.output_fields_map = {
74
+ "regions[...].region_info.mask,regions[...].data.concepts": "predicted_mask"
75
+ }
@@ -163,3 +163,33 @@ class TritonModelConfig:
163
163
  # with each value being the confidence for the respective model output.
164
164
  del pred_labels.labels
165
165
  self.output.append(pred_labels)
166
+
167
+ elif self.model_type == "text-to-text":
168
+ self.input.append(text_input)
169
+ pred_text = OutputConfig(name="text", data_type="TYPE_STRING", dims=[1], labels=False)
170
+ self.output.append(pred_text)
171
+
172
+ elif self.model_type == "text-embedder":
173
+ self.input.append(text_input)
174
+ embedding_vector = OutputConfig(
175
+ name="embeddings", data_type="TYPE_FP32", dims=[-1], labels=False)
176
+ self.output.append(embedding_vector)
177
+
178
+ elif self.model_type == "text-to-image":
179
+ self.input.append(text_input)
180
+ gen_image = OutputConfig(
181
+ name="image", data_type="TYPE_UINT8", dims=[-1, -1, 3], labels=False)
182
+ self.output.append(gen_image)
183
+
184
+ elif self.model_type == "visual-embedder":
185
+ self.input.append(image_input)
186
+ embedding_vector = OutputConfig(
187
+ name="embeddings", data_type="TYPE_FP32", dims=[-1], labels=False)
188
+ self.output.append(embedding_vector)
189
+
190
+ elif self.model_type == "visual-segmenter":
191
+ self.input.append(image_input)
192
+ pred_masks = OutputConfig(
193
+ name="predicted_mask", data_type="TYPE_INT64", dims=[-1, -1], labels=True)
194
+ del pred_masks.labels
195
+ self.output.append(pred_masks)
@@ -112,3 +112,125 @@ def text_classifier(func: Callable):
112
112
  return inference_response
113
113
 
114
114
  return parse_predictions
115
+
116
+
117
+ def text_to_text(func: Callable):
118
+ """
119
+ Text to text type output parser.
120
+ Convert a sequence of text into another e.g. text generation,
121
+ summarization or translation.
122
+ """
123
+
124
+ @wraps(func)
125
+ def parse_predictions(self, input_data: np.ndarray):
126
+ """
127
+ Format predictions and return clarifai compatible output.
128
+ """
129
+ out_text = []
130
+ input_data = [in_elem[0].decode() for in_elem in input_data]
131
+ for item in input_data:
132
+ preds = func(self, item)
133
+ out_text.append(preds.predicted_text)
134
+
135
+ out_text_tensor = pb_utils.Tensor("text", np.asarray(out_text, dtype=object))
136
+ inference_response = pb_utils.InferenceResponse(output_tensors=[out_text_tensor])
137
+
138
+ return inference_response
139
+
140
+ return parse_predictions
141
+
142
+
143
+ def text_embedder(func: Callable):
144
+ """
145
+ Text embedder type output parser.
146
+ Generates embeddings for an input text.
147
+ """
148
+
149
+ @wraps(func)
150
+ def parse_predictions(self, input_data: np.ndarray):
151
+ """
152
+ Format predictions and return clarifai compatible output.
153
+ """
154
+ out_embeddings = []
155
+ input_data = [in_elem[0].decode() for in_elem in input_data]
156
+ for item in input_data:
157
+ preds = func(self, item)
158
+ out_embeddings.append(preds.embedding_vector)
159
+
160
+ out_embed_tensor = pb_utils.Tensor("embeddings", np.asarray(out_embeddings, dtype=np.float32))
161
+ inference_response = pb_utils.InferenceResponse(output_tensors=[out_embed_tensor])
162
+
163
+ return inference_response
164
+
165
+ return parse_predictions
166
+
167
+
168
+ def visual_embedder(func: Callable):
169
+ """
170
+ Visual embedder type output parser.
171
+ Generates embeddings for an input image.
172
+ """
173
+
174
+ @wraps(func)
175
+ def parse_predictions(self, input_data: np.ndarray):
176
+ """
177
+ Format predictions and return clarifai compatible output.
178
+ """
179
+ out_embeddings = []
180
+ for item in input_data:
181
+ preds = func(self, item)
182
+ out_embeddings.append(preds.embedding_vector)
183
+
184
+ out_embed_tensor = pb_utils.Tensor("embeddings", np.asarray(out_embeddings, dtype=np.float32))
185
+ inference_response = pb_utils.InferenceResponse(output_tensors=[out_embed_tensor])
186
+
187
+ return inference_response
188
+
189
+ return parse_predictions
190
+
191
+
192
+ def visual_segmenter(func: Callable):
193
+ """
194
+ Visual segmenter type output parser.
195
+ """
196
+
197
+ @wraps(func)
198
+ def parse_predictions(self, input_data: np.ndarray):
199
+ """
200
+ Format predictions and return clarifai compatible output.
201
+ """
202
+ masks = []
203
+ for item in input_data:
204
+ preds = func(self, item)
205
+ masks.append(preds.predicted_mask)
206
+
207
+ out_mask_tensor = pb_utils.Tensor("predicted_mask", np.asarray(masks, dtype=np.int64))
208
+ inference_response = pb_utils.InferenceResponse(output_tensors=[out_mask_tensor])
209
+
210
+ return inference_response
211
+
212
+ return parse_predictions
213
+
214
+
215
+ def text_to_image(func: Callable):
216
+ """
217
+ Text to image type output parser.
218
+ """
219
+
220
+ @wraps(func)
221
+ def parse_predictions(self, input_data: np.ndarray):
222
+ """
223
+ Format predictions and return clarifai compatible output.
224
+ """
225
+ gen_images = []
226
+ input_data = [in_elem[0].decode() for in_elem in input_data]
227
+ for item in input_data:
228
+ preds = func(self, item)
229
+ gen_images.append(preds.image)
230
+
231
+ out_image_tensor = pb_utils.Tensor("image", np.asarray(gen_images, dtype=np.uint8))
232
+ inference_response = pb_utils.InferenceResponse(output_tensors=[out_image_tensor])
233
+
234
+ return inference_response
235
+
236
+ return parse_predictions
@@ -59,3 +59,65 @@ class ClassifierOutput:
59
59
  """
60
60
  assert self.predicted_scores.ndim == 1, \
61
61
  f"All predictions must be 1-dimensional, Got scores-dims: {self.predicted_scores.ndim} instead."
62
+
63
+
64
+ @dataclass
65
+ class TextOutput:
66
+ """
67
+ Takes model text predictions
68
+ """
69
+ predicted_text: np.ndarray
70
+
71
+ def __post_init__(self):
72
+ """
73
+ Validate input upon initialization.
74
+ """
75
+ assert self.predicted_text.ndim == 1, \
76
+ f"All predictions must be 1-dimensional, Got text-dims: {self.predicted_text.ndim} instead."
77
+
78
+
79
+ @dataclass
80
+ class EmbeddingOutput:
81
+ """
82
+ Takes embedding vector returned by a model.
83
+ """
84
+ embedding_vector: np.ndarray
85
+
86
+ def __post_init__(self):
87
+ """
88
+ Validate input upon initialization.
89
+ """
90
+ assert self.embedding_vector.ndim == 1, \
91
+ f"Embeddings must be 1-dimensional, Got embedding-dims: {self.embedding_vector.ndim} instead."
92
+
93
+
94
+ @dataclass
95
+ class MasksOutput:
96
+ """
97
+ Takes image segmentation masks returned by a model.
98
+ """
99
+ predicted_mask: np.ndarray
100
+
101
+ def __post_init__(self):
102
+ """
103
+ Validate input upon initialization.
104
+ """
105
+ assert self.predicted_mask.ndim == 2, \
106
+ f"predicted_mask must be 2-dimensional, Got mask dims: {self.predicted_mask.ndim} instead."
107
+
108
+
109
+ @dataclass
110
+ class ImageOutput:
111
+ """
112
+ Takes a predicted/generated image array as returned by a model.
113
+ """
114
+ image: np.ndarray
115
+
116
+ def __post_init__(self):
117
+ """
118
+ Validate input upon initialization.
119
+ """
120
+ assert self.image.ndim == 3, \
121
+ f"Generated image must be 3-dimensional, Got image-dims: {self.image.ndim} instead."
122
+ assert self.image.shape[2] == 3, \
123
+ f"The image channels dimension must equal 3, Got channel dim: {self.image.shape[2]} instead."
@@ -37,7 +37,6 @@ class TritonPythonModel:
37
37
  from inference import InferenceModel
38
38
 
39
39
  self.inference_obj = InferenceModel()
40
- self.device = "cuda:0" if "GPU" in args["model_instance_kind"] else "cpu"
41
40
 
42
41
  # Read input_name from config file
43
42
  self.config_msg = ModelConfig()
@@ -203,6 +203,13 @@ code
203
203
  border-color: #d0d5dd;
204
204
  box-shadow: 0 1px 2px rgba(16,24,40,.05);
205
205
  }
206
+
207
+ .stTextInput > div > div > input {
208
+ background-color: white;
209
+ }
210
+
211
+
212
+
206
213
  div[data-testid="stFileUploader"]>section {
207
214
  border: 1px dashed #d0d5dd;
208
215
  border-radius: 8px;
@@ -0,0 +1,140 @@
1
+ # Copyright 2023 Clarifai, Inc.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ """Interface to Clarifai Runners API."""
14
+
15
+ from typing import Type
16
+
17
+ from clarifai_grpc.grpc.api import resources_pb2, service_pb2
18
+ from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
19
+ from google.protobuf import json_format
20
+
21
+ from clarifai.auth.helper import ClarifaiAuthHelper
22
+ from clarifai.client import create_stub
23
+
24
+
25
+ class BaseRunner:
26
+ """
27
+ Base class for remote inference runners. This should be subclassed with the run_input method
28
+ implemented to process each input in the request.
29
+
30
+ Then on the subclass call start() to start the run loop.
31
+ """
32
+
33
+ def __init__(self, auth: Type[ClarifaiAuthHelper], runner_id: str) -> None:
34
+ """
35
+ Args:
36
+ auth: ClarifaiAuthHelper - the auth object to use
37
+ runner_id: str - the id of the runner to use. Create the runner in the Clarifai API first
38
+
39
+ """
40
+ self.auth = auth
41
+ self.stub = create_stub(self.auth)
42
+ self.runner_id = runner_id
43
+
44
+ # Check that the runner exists.
45
+ response = self.stub.GetRunner(
46
+ service_pb2.GetRunnerRequest(
47
+ user_app_id=self.auth.get_user_app_id_proto(), runner_id=self.runner_id))
48
+ if work_response.status.code != status_code_pb2.SUCCESS:
49
+ raise Exception(
50
+ f"Error getting runner, are you use this is a valid runner id {runner_id} at the user_id/app_id {self.auth.get_user_app_id_proto().user_id}/{self.auth.get_user_app_id_proto().app_id}. Error: {response.status.description}"
51
+ )
52
+
53
+ def start(self):
54
+ """
55
+ Start the run loop. This will ask the Clarifai API for work, and when it gets work, it will run
56
+ the model on the inputs and post the results back to the Clarifai API. It will then ask for more
57
+ work again.
58
+ """
59
+ self._long_poll_loop()
60
+
61
+ def _run(self, request: service_pb2.PostModelOutputsRequest) -> service_pb2.MultiOutputResponse:
62
+ """
63
+ Run the model on the given request. You shouldn't need to override this method, see run_input
64
+ for the implementation to process each input in the request.
65
+
66
+ Args:
67
+ request: service_pb2.PostModelOutputsRequest - the request to run the model on
68
+
69
+ Returns:
70
+ service_pb2.MultiOutputResponse - the response from the model's run_input implementation.
71
+ """
72
+ outputs = []
73
+ # TODO: parallelize this
74
+ for inp in request.inputs:
75
+ # TODO: handle errors
76
+ outputs.append(self.run_input(inp))
77
+
78
+ return service_pb2.MultiOutputResponse(
79
+ status=status_pb2.Status(
80
+ code=status_code_pb2.SUCCESS,
81
+ description="Success",
82
+ ),
83
+ outputs=outputs,
84
+ )
85
+
86
+ def run_input(self, input: resources_pb2.Input) -> resources_pb2.Output:
87
+ """
88
+ Run the model on the given input in the request. This is the method you should override to
89
+ process each input in the request.
90
+
91
+ Args:
92
+ input: resources_pb2.Input - the input to run the model on
93
+
94
+ Returns:
95
+ resources_pb2.Output - the response from the model's run_input implementation.
96
+ """
97
+ raise NotImplementedError("run_input() not implemented")
98
+
99
+ def _long_poll_loop(self):
100
+ """
101
+ This method will long poll for work, and when it gets work, it will run the model on the inputs
102
+ and post the results back to the Clarifai API. It will then long poll again for more work.
103
+ """
104
+ c = 0
105
+ # TODO: handle more errors within this loop so it never stops.
106
+ # TODO: perhaps have multiple processes running this loop to handle more work.
107
+ while True:
108
+ # Long poll waiting for work.
109
+ print("Loop iteration: {}".format(c))
110
+ work_response = self.stub.ListRunnerItems(
111
+ service_pb2.ListRunnerItemsRequest(
112
+ user_app_id=self.auth.get_user_app_id_proto(), runner_id=self.runner_id))
113
+ if work_response.status.code == status_code_pb2.RUNNER_NEEDS_RETRY:
114
+ c += 1
115
+ continue # immediate restart the long poll
116
+ if work_response.status.code != status_code_pb2.SUCCESS:
117
+ raise Exception("Error getting work: {}".format(work_response.status.description))
118
+ if len(work_response.items) == 0:
119
+ print("No work to do. Waiting...")
120
+ continue
121
+
122
+ # We have work to do. Run the model on the inputs.
123
+ for item in work_response.items:
124
+ if not item.HasField('post_model_outputs_request'):
125
+ raise Exception("Unexpected work item type: {}".format(item))
126
+ print(
127
+ f"Working on item: {item.id} with inputs {len(item.post_model_outputs_request.inputs)}"
128
+ )
129
+ result = self._run(item.post_model_outputs_request)
130
+
131
+ result_response = self.stub.PostRunnerItemOutputs(
132
+ service_pb2.PostRunnerItemOutputsRequest(
133
+ user_app_id=self.auth.get_user_app_id_proto(),
134
+ item_id=item.id,
135
+ runner_id=self.runner_id,
136
+ runner_item_outputs=[service_pb2.RunnerItemOutput(multi_output_response=result)]))
137
+ if result_response.status.code != status_code_pb2.SUCCESS:
138
+ raise Exception(
139
+ json_format.MessageToJson(result_response, preserving_proto_field_name=True))
140
+ # raise Exception("Error posting result: {}".format(result_response.status.description))
@@ -0,0 +1,36 @@
1
+ from clarifai_grpc.grpc.api import resources_pb2
2
+
3
+ from clarifai.auth.helper import ClarifaiAuthHelper
4
+ from clarifai.runners.base import BaseRunner
5
+
6
+
7
+ class MyRunner(BaseRunner):
8
+ """ A custom runner that adds "Hello World" to the end of the text and replaces the domain of the
9
+ image URL as an example.
10
+ """
11
+
12
+ def run_input(self, input: resources_pb2.Input) -> resources_pb2.Output:
13
+ """ This is the method that will be called when the runner is run. It takes in an input and
14
+ returns an output.
15
+ """
16
+
17
+ output = resources_pb2.Output()
18
+
19
+ data = input.data
20
+
21
+ if data.text.raw != "":
22
+ output.data.text.raw = data.text.raw + "Hello World"
23
+ if data.image.url != "":
24
+ output.data.text.raw = data.image.url.replace("samples.clarifai.com", "newdomain.com")
25
+ return output
26
+
27
+
28
+ if __name__ == '__main__':
29
+ # Make sure you set these env vars before running the example.
30
+ # CLARIFAI_PAT
31
+ # CLARIFAI_USER_ID
32
+ # CLARIFAI_APP_ID
33
+ auth = ClarifaiAuthHelper.from_env()
34
+
35
+ # You need to first create a runner in the Clarifai API and then use the ID here.
36
+ MyRunner(auth, runner_id="laptop_runner").start()