clarifai 11.1.5__py3-none-any.whl → 11.1.5rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. clarifai/__init__.py +1 -1
  2. clarifai/__pycache__/__init__.cpython-310.pyc +0 -0
  3. clarifai/__pycache__/errors.cpython-310.pyc +0 -0
  4. clarifai/__pycache__/versions.cpython-310.pyc +0 -0
  5. clarifai/cli/__main__.py~ +4 -0
  6. clarifai/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  7. clarifai/cli/__pycache__/__main__.cpython-310.pyc +0 -0
  8. clarifai/cli/__pycache__/base.cpython-310.pyc +0 -0
  9. clarifai/cli/__pycache__/compute_cluster.cpython-310.pyc +0 -0
  10. clarifai/cli/__pycache__/deployment.cpython-310.pyc +0 -0
  11. clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
  12. clarifai/cli/__pycache__/nodepool.cpython-310.pyc +0 -0
  13. clarifai/cli/model.py +25 -0
  14. clarifai/client/__pycache__/__init__.cpython-310.pyc +0 -0
  15. clarifai/client/__pycache__/app.cpython-310.pyc +0 -0
  16. clarifai/client/__pycache__/base.cpython-310.pyc +0 -0
  17. clarifai/client/__pycache__/dataset.cpython-310.pyc +0 -0
  18. clarifai/client/__pycache__/input.cpython-310.pyc +0 -0
  19. clarifai/client/__pycache__/lister.cpython-310.pyc +0 -0
  20. clarifai/client/__pycache__/model.cpython-310.pyc +0 -0
  21. clarifai/client/__pycache__/module.cpython-310.pyc +0 -0
  22. clarifai/client/__pycache__/runner.cpython-310.pyc +0 -0
  23. clarifai/client/__pycache__/search.cpython-310.pyc +0 -0
  24. clarifai/client/__pycache__/user.cpython-310.pyc +0 -0
  25. clarifai/client/__pycache__/workflow.cpython-310.pyc +0 -0
  26. clarifai/client/auth/__pycache__/__init__.cpython-310.pyc +0 -0
  27. clarifai/client/auth/__pycache__/helper.cpython-310.pyc +0 -0
  28. clarifai/client/auth/__pycache__/register.cpython-310.pyc +0 -0
  29. clarifai/client/auth/__pycache__/stub.cpython-310.pyc +0 -0
  30. clarifai/client/model.py +89 -365
  31. clarifai/client/model_client.py +422 -0
  32. clarifai/constants/__pycache__/dataset.cpython-310.pyc +0 -0
  33. clarifai/constants/__pycache__/model.cpython-310.pyc +0 -0
  34. clarifai/constants/__pycache__/search.cpython-310.pyc +0 -0
  35. clarifai/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  36. clarifai/datasets/export/__pycache__/__init__.cpython-310.pyc +0 -0
  37. clarifai/datasets/export/__pycache__/inputs_annotations.cpython-310.pyc +0 -0
  38. clarifai/datasets/upload/__pycache__/__init__.cpython-310.pyc +0 -0
  39. clarifai/datasets/upload/__pycache__/base.cpython-310.pyc +0 -0
  40. clarifai/datasets/upload/__pycache__/features.cpython-310.pyc +0 -0
  41. clarifai/datasets/upload/__pycache__/image.cpython-310.pyc +0 -0
  42. clarifai/datasets/upload/__pycache__/text.cpython-310.pyc +0 -0
  43. clarifai/datasets/upload/__pycache__/utils.cpython-310.pyc +0 -0
  44. clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-310.pyc +0 -0
  45. clarifai/datasets/upload/loaders/__pycache__/coco_detection.cpython-310.pyc +0 -0
  46. clarifai/models/__pycache__/__init__.cpython-310.pyc +0 -0
  47. clarifai/models/model_serving/__pycache__/__init__.cpython-310.pyc +0 -0
  48. clarifai/models/model_serving/__pycache__/constants.cpython-310.pyc +0 -0
  49. clarifai/models/model_serving/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  50. clarifai/models/model_serving/cli/__pycache__/_utils.cpython-310.pyc +0 -0
  51. clarifai/models/model_serving/cli/__pycache__/base.cpython-310.pyc +0 -0
  52. clarifai/models/model_serving/cli/__pycache__/build.cpython-310.pyc +0 -0
  53. clarifai/models/model_serving/cli/__pycache__/create.cpython-310.pyc +0 -0
  54. clarifai/models/model_serving/model_config/__pycache__/__init__.cpython-310.pyc +0 -0
  55. clarifai/models/model_serving/model_config/__pycache__/base.cpython-310.pyc +0 -0
  56. clarifai/models/model_serving/model_config/__pycache__/config.cpython-310.pyc +0 -0
  57. clarifai/models/model_serving/model_config/__pycache__/inference_parameter.cpython-310.pyc +0 -0
  58. clarifai/models/model_serving/model_config/__pycache__/output.cpython-310.pyc +0 -0
  59. clarifai/models/model_serving/model_config/triton/__pycache__/__init__.cpython-310.pyc +0 -0
  60. clarifai/models/model_serving/model_config/triton/__pycache__/serializer.cpython-310.pyc +0 -0
  61. clarifai/models/model_serving/model_config/triton/__pycache__/triton_config.cpython-310.pyc +0 -0
  62. clarifai/models/model_serving/model_config/triton/__pycache__/wrappers.cpython-310.pyc +0 -0
  63. clarifai/models/model_serving/repo_build/__pycache__/__init__.cpython-310.pyc +0 -0
  64. clarifai/models/model_serving/repo_build/__pycache__/build.cpython-310.pyc +0 -0
  65. clarifai/models/model_serving/repo_build/static_files/__pycache__/base_test.cpython-310-pytest-7.2.0.pyc +0 -0
  66. clarifai/rag/__pycache__/__init__.cpython-310.pyc +0 -0
  67. clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
  68. clarifai/rag/__pycache__/utils.cpython-310.pyc +0 -0
  69. clarifai/runners/__init__.py +2 -7
  70. clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
  71. clarifai/runners/__pycache__/server.cpython-310.pyc +0 -0
  72. clarifai/runners/dockerfile_template/Dockerfile.debug +11 -0
  73. clarifai/runners/dockerfile_template/Dockerfile.debug~ +9 -0
  74. clarifai/runners/dockerfile_template/Dockerfile.template +3 -0
  75. clarifai/runners/models/__pycache__/__init__.cpython-310.pyc +0 -0
  76. clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
  77. clarifai/runners/models/__pycache__/model_builder.cpython-310.pyc +0 -0
  78. clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
  79. clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
  80. clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
  81. clarifai/runners/models/__pycache__/model_servicer.cpython-310.pyc +0 -0
  82. clarifai/runners/models/__pycache__/model_upload.cpython-310.pyc +0 -0
  83. clarifai/runners/models/model_builder.py +33 -7
  84. clarifai/runners/models/model_class.py +269 -28
  85. clarifai/runners/models/model_run_locally.py +3 -78
  86. clarifai/runners/models/model_runner.py +2 -0
  87. clarifai/runners/models/model_servicer.py +11 -2
  88. clarifai/runners/server.py +5 -1
  89. clarifai/runners/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  90. clarifai/runners/utils/__pycache__/const.cpython-310.pyc +0 -0
  91. clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
  92. clarifai/runners/utils/__pycache__/data_types.cpython-310.pyc +0 -0
  93. clarifai/runners/utils/__pycache__/data_utils.cpython-310.pyc +0 -0
  94. clarifai/runners/utils/__pycache__/loader.cpython-310.pyc +0 -0
  95. clarifai/runners/utils/__pycache__/logging.cpython-310.pyc +0 -0
  96. clarifai/runners/utils/__pycache__/method_signatures.cpython-310.pyc +0 -0
  97. clarifai/runners/utils/__pycache__/serializers.cpython-310.pyc +0 -0
  98. clarifai/runners/utils/__pycache__/url_fetcher.cpython-310.pyc +0 -0
  99. clarifai/runners/utils/data_handler.py +308 -205
  100. clarifai/runners/utils/data_types.py +334 -0
  101. clarifai/runners/utils/method_signatures.py +452 -0
  102. clarifai/runners/utils/serializers.py +132 -0
  103. clarifai/schema/__pycache__/search.cpython-310.pyc +0 -0
  104. clarifai/urls/__pycache__/helper.cpython-310.pyc +0 -0
  105. clarifai/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  106. clarifai/utils/__pycache__/logging.cpython-310.pyc +0 -0
  107. clarifai/utils/__pycache__/misc.cpython-310.pyc +0 -0
  108. clarifai/utils/__pycache__/model_train.cpython-310.pyc +0 -0
  109. clarifai/utils/evaluation/__pycache__/__init__.cpython-310.pyc +0 -0
  110. clarifai/utils/evaluation/__pycache__/helpers.cpython-310.pyc +0 -0
  111. clarifai/utils/evaluation/__pycache__/main.cpython-310.pyc +0 -0
  112. clarifai/workflows/__pycache__/__init__.cpython-310.pyc +0 -0
  113. clarifai/workflows/__pycache__/export.cpython-310.pyc +0 -0
  114. clarifai/workflows/__pycache__/utils.cpython-310.pyc +0 -0
  115. clarifai/workflows/__pycache__/validate.cpython-310.pyc +0 -0
  116. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc2.dist-info}/METADATA +16 -26
  117. clarifai-11.1.5rc2.dist-info/RECORD +203 -0
  118. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc2.dist-info}/WHEEL +1 -1
  119. clarifai/runners/models/base_typed_model.py +0 -238
  120. clarifai-11.1.5.dist-info/RECORD +0 -101
  121. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc2.dist-info}/LICENSE +0 -0
  122. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc2.dist-info}/entry_points.txt +0 -0
  123. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,422 @@
1
+ import inspect
2
+ import time
3
+ from typing import Any, Dict, Iterator, List
4
+
5
+ from clarifai_grpc.grpc.api import resources_pb2, service_pb2
6
+ from clarifai_grpc.grpc.api.status import status_code_pb2
7
+
8
+ from clarifai.constants.model import MAX_MODEL_PREDICT_INPUTS
9
+ from clarifai.errors import UserError
10
+ from clarifai.runners.utils.method_signatures import (deserialize, get_stream_from_signature,
11
+ serialize, signatures_from_json,
12
+ unflatten_nested_keys)
13
+ from clarifai.utils.misc import BackoffIterator, status_is_retryable
14
+
15
+
16
+ class ModelClient:
17
+ '''
18
+ Client for calling model predict, generate, and stream methods.
19
+ '''
20
+
21
+ def __init__(self, stub, request_template: service_pb2.PostModelOutputsRequest = None):
22
+ '''
23
+ Initialize the model client.
24
+
25
+ Args:
26
+ stub: The gRPC stub for the model.
27
+ request_template: The template for the request to send to the model, including
28
+ common fields like model_id, model_version, cluster, etc.
29
+ '''
30
+ self.STUB = stub
31
+ self.request_template = request_template or service_pb2.PostModelOutputsRequest()
32
+ self._fetch_signatures()
33
+ self._define_functions()
34
+
35
+ def _fetch_signatures(self):
36
+ '''
37
+ Fetch the method signatures from the model.
38
+
39
+ Returns:
40
+ Dict: The method signatures.
41
+ '''
42
+ #request = resources_pb2.GetModelSignaturesRequest()
43
+ #response = self.stub.GetModelSignatures(request)
44
+ #self._method_signatures = json.loads(response.signatures) # or define protos
45
+ # TODO this could use a new endpoint to get the signatures
46
+ # for local grpc models, we'll also have to add the endpoint to the model servicer
47
+ # for now we'll just use the predict endpoint with a special method name
48
+
49
+ request = service_pb2.PostModelOutputsRequest()
50
+ request.CopyFrom(self.request_template)
51
+ request.model.model_version.output_info.params['_method_name'] = '_GET_SIGNATURES'
52
+ request.inputs.add() # empty input for this method
53
+ start_time = time.time()
54
+ backoff_iterator = BackoffIterator(10)
55
+ while True:
56
+ response = self.STUB.PostModelOutputs(request)
57
+ if status_is_retryable(
58
+ response.status.code) and time.time() - start_time < 60 * 10: # 10 minutes
59
+ self.logger.info(f"Retrying model info fetch with response {response.status!r}")
60
+ time.sleep(next(backoff_iterator))
61
+ continue
62
+ break
63
+ if response.status.code == status_code_pb2.INPUT_UNSUPPORTED_FORMAT:
64
+ # return code from older models that don't support _GET_SIGNATURES
65
+ self._method_signatures = {}
66
+ return
67
+ if response.status.code != status_code_pb2.SUCCESS:
68
+ raise Exception(f"Model failed with response {response.status!r}")
69
+ self._method_signatures = signatures_from_json(response.outputs[0].data.string_value)
70
+
71
+ def _define_functions(self):
72
+ '''
73
+ Define the functions based on the method signatures.
74
+ '''
75
+ for method_name, method_signature in self._method_signatures.items():
76
+ # define the function in this client instance
77
+ if method_signature.method_type == 'predict':
78
+ call_func = self._predict
79
+ elif method_signature.method_type == 'generate':
80
+ call_func = self._generate
81
+ elif method_signature.method_type == 'stream':
82
+ call_func = self._stream
83
+ else:
84
+ raise ValueError(f"Unknown method type {method_signature.method_type}")
85
+
86
+ # method argnames, in order, collapsing nested keys to corresponding user function args
87
+ method_argnames = []
88
+ for var in method_signature.inputs:
89
+ outer = var.name.split('.', 1)[0]
90
+ if outer in method_argnames:
91
+ continue
92
+ method_argnames.append(outer)
93
+
94
+ def bind_f(method_name, method_argnames, call_func):
95
+
96
+ def f(*args, **kwargs):
97
+ if len(args) > len(method_argnames):
98
+ raise TypeError(
99
+ f"{method_name}() takes {len(method_argnames)} positional arguments but {len(args)} were given"
100
+ )
101
+ for name, arg in zip(method_argnames, args): # handle positional with zip shortest
102
+ if name in kwargs:
103
+ raise TypeError(f"Multiple values for argument {name}")
104
+ kwargs[name] = arg
105
+ return call_func(kwargs, method_name)
106
+
107
+ return f
108
+
109
+ # need to bind method_name to the value, not the mutating loop variable
110
+ f = bind_f(method_name, method_argnames, call_func)
111
+
112
+ # set names, annotations and docstrings
113
+ f.__name__ = method_name
114
+ f.__qualname__ = f'{self.__class__.__name__}.{method_name}'
115
+ input_annos = {var.name: var.data_type for var in method_signature.inputs}
116
+ output_annos = {var.name: var.data_type for var in method_signature.outputs}
117
+ # unflatten nested keys to match the user function args for docs
118
+ input_annos = unflatten_nested_keys(input_annos, method_signature.inputs, is_output=False)
119
+ output_annos = unflatten_nested_keys(output_annos, method_signature.outputs, is_output=True)
120
+
121
+ # add Stream[] to the stream input annotations for docs
122
+ input_stream_argname, _ = get_stream_from_signature(method_signature.inputs)
123
+ if input_stream_argname:
124
+ input_annos[input_stream_argname] = 'Stream[' + str(
125
+ input_annos[input_stream_argname]) + ']'
126
+
127
+ # handle multiple outputs in the return annotation
128
+ return_annotation = output_annos
129
+ name = next(iter(output_annos.keys()))
130
+ if len(output_annos) == 1 and name == 'return':
131
+ # single output
132
+ return_annotation = output_annos[name]
133
+ elif name.startswith('return.') and name.split('.', 1)[1].isnumeric():
134
+ # tuple output
135
+ return_annotation = '(' + ", ".join(output_annos[f'return.{i}']
136
+ for i in range(len(output_annos))) + ')'
137
+ else:
138
+ # named output
139
+ return_annotation = f'Output({", ".join(f"{k}={t}" for k, t in output_annos.items())})'
140
+ if method_signature.method_type in ['generate', 'stream']:
141
+ return_annotation = f'Stream[{return_annotation}]'
142
+
143
+ # set annotations and docstrings
144
+ sig = inspect.signature(f).replace(
145
+ parameters=[
146
+ inspect.Parameter(k, inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=v)
147
+ for k, v in input_annos.items()
148
+ ],
149
+ return_annotation=return_annotation,
150
+ )
151
+ f.__signature__ = sig
152
+ f.__doc__ = method_signature.docstring
153
+ setattr(self, method_name, f)
154
+
155
+ def _predict(
156
+ self,
157
+ inputs, # TODO set up functions according to fetched signatures?
158
+ method_name: str = 'predict',
159
+ ) -> Any:
160
+ input_signature = self._method_signatures[method_name].inputs
161
+ output_signature = self._method_signatures[method_name].outputs
162
+
163
+ batch_input = True
164
+ if isinstance(inputs, dict):
165
+ inputs = [inputs]
166
+ batch_input = False
167
+
168
+ proto_inputs = []
169
+ for input in inputs:
170
+ proto = resources_pb2.Input()
171
+ serialize(input, input_signature, proto.data)
172
+ proto_inputs.append(proto)
173
+
174
+ response = self._predict_by_proto(proto_inputs, method_name)
175
+ #print(response)
176
+
177
+ outputs = []
178
+ for output in response.outputs:
179
+ outputs.append(deserialize(output.data, output_signature, is_output=True))
180
+ if batch_input:
181
+ return outputs
182
+ return outputs[0]
183
+
184
+ def _predict_by_proto(
185
+ self,
186
+ inputs: List[resources_pb2.Input],
187
+ method_name: str = None,
188
+ inference_params: Dict = None,
189
+ output_config: Dict = None,
190
+ ) -> service_pb2.MultiOutputResponse:
191
+ """Predicts the model based on the given inputs.
192
+
193
+ Args:
194
+ inputs (List[resources_pb2.Input]): The inputs to predict.
195
+ method_name (str): The remote method name to call.
196
+ inference_params (Dict): Inference parameters to override.
197
+ output_config (Dict): Output configuration to override.
198
+
199
+ Returns:
200
+ service_pb2.MultiOutputResponse: The prediction response(s).
201
+ """
202
+ if not isinstance(inputs, list):
203
+ raise UserError('Invalid inputs, inputs must be a list of Input objects.')
204
+ if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
205
+ raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}.")
206
+
207
+ request = service_pb2.PostModelOutputsRequest()
208
+ request.CopyFrom(self.request_template)
209
+
210
+ request.inputs.extend(inputs)
211
+
212
+ if method_name:
213
+ # TODO put in new proto field?
214
+ request.model.model_version.output_info.params['_method_name'] = method_name
215
+ if inference_params:
216
+ request.model.model_version.output_info.params.update(inference_params)
217
+ if output_config:
218
+ request.model.model_version.output_info.output_config.MergeFrom(
219
+ resources_pb2.OutputConfig(**output_config))
220
+
221
+ start_time = time.time()
222
+ backoff_iterator = BackoffIterator(10)
223
+ while True:
224
+ response = self.STUB.PostModelOutputs(request)
225
+ if status_is_retryable(
226
+ response.status.code) and time.time() - start_time < 60 * 10: # 10 minutes
227
+ self.logger.info(f"Model predict failed with response {response.status!r}")
228
+ time.sleep(next(backoff_iterator))
229
+ continue
230
+
231
+ if response.status.code != status_code_pb2.SUCCESS:
232
+ raise Exception(f"Model predict failed with response {response.status!r}")
233
+ break
234
+
235
+ return response
236
+
237
+ def _generate(
238
+ self,
239
+ inputs, # TODO set up functions according to fetched signatures?
240
+ method_name: str = 'generate',
241
+ ) -> Any:
242
+ input_signature = self._method_signatures[method_name].inputs
243
+ output_signature = self._method_signatures[method_name].outputs
244
+
245
+ batch_input = True
246
+ if isinstance(inputs, dict):
247
+ inputs = [inputs]
248
+ batch_input = False
249
+
250
+ proto_inputs = []
251
+ for input in inputs:
252
+ proto = resources_pb2.Input()
253
+ serialize(input, input_signature, proto.data)
254
+ proto_inputs.append(proto)
255
+
256
+ response_stream = self._generate_by_proto(proto_inputs, method_name)
257
+ #print(response)
258
+
259
+ for response in response_stream:
260
+ outputs = []
261
+ for output in response.outputs:
262
+ outputs.append(deserialize(output.data, output_signature, is_output=True))
263
+ if batch_input:
264
+ yield outputs
265
+ yield outputs[0]
266
+
267
+ def _generate_by_proto(
268
+ self,
269
+ inputs: List[resources_pb2.Input],
270
+ method_name: str = None,
271
+ inference_params: Dict = {},
272
+ output_config: Dict = {},
273
+ ):
274
+ """Generate the stream output on model based on the given inputs.
275
+
276
+ Args:
277
+ inputs (list[Input]): The inputs to generate, must be less than 128.
278
+ method_name (str): The remote method name to call.
279
+ inference_params (dict): The inference params to override.
280
+ output_config (dict): The output config to override.
281
+ """
282
+ if not isinstance(inputs, list):
283
+ raise UserError('Invalid inputs, inputs must be a list of Input objects.')
284
+ if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
285
+ raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
286
+ ) # TODO Use Chunker for inputs len > 128
287
+
288
+ request = service_pb2.PostModelOutputsRequest()
289
+ request.CopyFrom(self.request_template)
290
+
291
+ request.inputs.extend(inputs)
292
+
293
+ if method_name:
294
+ # TODO put in new proto field?
295
+ request.model.model_version.output_info.params['_method_name'] = method_name
296
+ if inference_params:
297
+ request.model.model_version.output_info.params.update(inference_params)
298
+ if output_config:
299
+ request.model.model_version.output_info.output_config.MergeFromDict(output_config)
300
+
301
+ start_time = time.time()
302
+ backoff_iterator = BackoffIterator(10)
303
+ started = False
304
+ while not started:
305
+ stream_response = self.STUB.GenerateModelOutputs(request)
306
+ try:
307
+ response = next(stream_response) # get the first response
308
+ except StopIteration:
309
+ raise Exception("Model Generate failed with no response")
310
+ if status_is_retryable(response.status.code) and \
311
+ time.time() - start_time < 60 * 10:
312
+ self.logger.info("Model is still deploying, please wait...")
313
+ time.sleep(next(backoff_iterator))
314
+ continue
315
+ if response.status.code != status_code_pb2.SUCCESS:
316
+ raise Exception(f"Model Generate failed with response {response.status!r}")
317
+ started = True
318
+
319
+ yield response # yield the first response
320
+
321
+ for response in stream_response:
322
+ if response.status.code != status_code_pb2.SUCCESS:
323
+ raise Exception(f"Model Generate failed with response {response.status!r}")
324
+ yield response
325
+
326
+ def _stream(
327
+ self,
328
+ inputs,
329
+ method_name: str = 'stream',
330
+ ) -> Any:
331
+ input_signature = self._method_signatures[method_name].inputs
332
+ output_signature = self._method_signatures[method_name].outputs
333
+
334
+ if isinstance(inputs, list):
335
+ assert len(inputs) == 1, 'streaming methods do not support batched calls'
336
+ inputs = inputs[0]
337
+ assert isinstance(inputs, dict)
338
+ kwargs = inputs
339
+
340
+ # find the streaming vars in the input signature, and the streaming input python param
341
+ stream_argname, streaming_var_signatures = get_stream_from_signature(input_signature)
342
+
343
+ # get the streaming input generator from the user-provided function arg values
344
+ user_inputs_generator = kwargs.pop(stream_argname)
345
+
346
+ def _input_proto_stream():
347
+ # first item contains all the inputs and the first stream item
348
+ proto = resources_pb2.Input()
349
+ try:
350
+ item = next(user_inputs_generator)
351
+ except StopIteration:
352
+ return # no items to stream
353
+ kwargs[stream_argname] = item
354
+ serialize(kwargs, input_signature, proto.data)
355
+
356
+ yield proto
357
+
358
+ # subsequent items are just the stream items
359
+ for item in user_inputs_generator:
360
+ proto = resources_pb2.Input()
361
+ serialize({stream_argname: item}, streaming_var_signatures, proto.data)
362
+ yield proto
363
+
364
+ response_stream = self._stream_by_proto(_input_proto_stream(), method_name)
365
+ #print(response)
366
+
367
+ for response in response_stream:
368
+ assert len(response.outputs) == 1, 'streaming methods must have exactly one output'
369
+ yield deserialize(response.outputs[0].data, output_signature, is_output=True)
370
+
371
+ def _req_iterator(self,
372
+ input_iterator: Iterator[List[resources_pb2.Input]],
373
+ method_name: str = None,
374
+ inference_params: Dict = {},
375
+ output_config: Dict = {}):
376
+ request = service_pb2.PostModelOutputsRequest()
377
+ request.CopyFrom(self.request_template)
378
+ request.model.model_version.output_info.params['_method_name'] = method_name
379
+ if inference_params:
380
+ request.model.model_version.output_info.params.update(inference_params)
381
+ if output_config:
382
+ request.model.model_version.output_info.output_config.MergeFromDict(output_config)
383
+ for inputs in input_iterator:
384
+ req = service_pb2.PostModelOutputsRequest()
385
+ req.CopyFrom(request)
386
+ if isinstance(inputs, list):
387
+ req.inputs.extend(inputs)
388
+ else:
389
+ req.inputs.append(inputs)
390
+ yield req
391
+
392
+ def _stream_by_proto(self,
393
+ inputs: Iterator[List[resources_pb2.Input]],
394
+ method_name: str = None,
395
+ inference_params: Dict = {},
396
+ output_config: Dict = {}):
397
+ """Generate the stream output on model based on the given stream of inputs.
398
+ """
399
+ # if not isinstance(inputs, Iterator[List[Input]]):
400
+ # raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
401
+
402
+ request = self._req_iterator(inputs, method_name, inference_params, output_config)
403
+
404
+ start_time = time.time()
405
+ backoff_iterator = BackoffIterator(10)
406
+ generation_started = False
407
+ while True:
408
+ if generation_started:
409
+ break
410
+ stream_response = self.STUB.StreamModelOutputs(request)
411
+ for response in stream_response:
412
+ if status_is_retryable(response.status.code) and \
413
+ time.time() - start_time < 60 * 10:
414
+ self.logger.info("Model is still deploying, please wait...")
415
+ time.sleep(next(backoff_iterator))
416
+ break
417
+ if response.status.code != status_code_pb2.SUCCESS:
418
+ raise Exception(f"Model Predict failed with response {response.status!r}")
419
+ else:
420
+ if not generation_started:
421
+ generation_started = True
422
+ yield response
@@ -1,14 +1,9 @@
1
- from .models.base_typed_model import AnyAnyModel, TextInputModel, VisualInputModel
2
1
  from .models.model_builder import ModelBuilder
2
+ from .models.model_class import ModelClass
3
3
  from .models.model_runner import ModelRunner
4
- from .utils.data_handler import InputDataHandler, OutputDataHandler
5
4
 
6
5
  __all__ = [
7
6
  "ModelRunner",
8
7
  "ModelBuilder",
9
- "InputDataHandler",
10
- "OutputDataHandler",
11
- "AnyAnyModel",
12
- "TextInputModel",
13
- "VisualInputModel",
8
+ "ModelClass",
14
9
  ]
@@ -0,0 +1,11 @@
1
+ FROM --platform=$TARGETPLATFORM public.ecr.aws/docker/library/python:3.12-slim-bookworm as pybase
2
+
3
+ #############################
4
+ # Final runtime image
5
+ #############################
6
+ FROM --platform=$TARGETPLATFORM ${RUNTIME_IMAGE} as final
7
+
8
+ COPY --from=pybase --link=true /usr/bin/ls /usr/bin/cat /usr/bin/which /usr/bin/bash /usr/bin/sort /usr/bin/du /usr/bin/
9
+ COPY --from=pybase --link=true /bin/rbash /bin/sh /bin/rm /bin/
10
+ COPY --from=pybase --link=true /lib/*-linux-gnu/libselinux.so.1 /lib/*-linux-gnu/libpcre2-8.so.0 /lib/x86_64-linux-gnu/
11
+ COPY --from=pybase --link=true /lib/*-linux-gnu/libselinux.so.1 /lib/*-linux-gnu/libpcre2-8.so.0 /lib/aarch64-linux-gnu/
@@ -0,0 +1,9 @@
1
+ FROM --platform=$TARGETPLATFORM public.ecr.aws/docker/library/python:3.12-slim-bookworm as pybase
2
+
3
+ #############################
4
+ # Final runtime image
5
+ #############################
6
+ FROM --platform=$TARGETPLATFORM ${RUNTIME_IMAGE} as final
7
+
8
+ COPY --from=pybase --link=true /usr/bin/ls /usr/bin/cat /usr/bin/which /usr/bin/bash /usr/bin/sort /usr/bin/du /usr/bin/
9
+ COPY --from=pybase --link=true /bin/rbash /bin/sh /bin/rm /bin/
@@ -44,6 +44,9 @@ ENV PYTHONPATH=${PYTHONPATH}:/home/nonroot/main \
44
44
  CLARIFAI_COMPUTE_CLUSTER_ID=${CLARIFAI_COMPUTE_CLUSTER_ID} \
45
45
  CLARIFAI_API_BASE=${CLARIFAI_API_BASE:-https://api.clarifai.com}
46
46
 
47
+ # Write out the model function signatures
48
+ RUN ["python", "-m", "clarifai.cli", "model", "signatures", "--model_path", "/home/nonroot/main", "--out_path", "/home/nonroot/main/signatures.yaml"]
49
+
47
50
  # Finally run the clarifai entrypoint to start the runner loop and local dev server.
48
51
  # Note(zeiler): we may want to make this a clarifai CLI call.
49
52
  ENTRYPOINT ["python", "-m", "clarifai.runners.server"]