clarifai 11.1.5__py3-none-any.whl → 11.1.5rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. clarifai/__init__.py +1 -1
  2. clarifai/__pycache__/__init__.cpython-310.pyc +0 -0
  3. clarifai/__pycache__/errors.cpython-310.pyc +0 -0
  4. clarifai/__pycache__/versions.cpython-310.pyc +0 -0
  5. clarifai/cli/__main__.py~ +4 -0
  6. clarifai/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  7. clarifai/cli/__pycache__/__main__.cpython-310.pyc +0 -0
  8. clarifai/cli/__pycache__/base.cpython-310.pyc +0 -0
  9. clarifai/cli/__pycache__/compute_cluster.cpython-310.pyc +0 -0
  10. clarifai/cli/__pycache__/deployment.cpython-310.pyc +0 -0
  11. clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
  12. clarifai/cli/__pycache__/nodepool.cpython-310.pyc +0 -0
  13. clarifai/cli/model.py +25 -0
  14. clarifai/client/__pycache__/__init__.cpython-310.pyc +0 -0
  15. clarifai/client/__pycache__/app.cpython-310.pyc +0 -0
  16. clarifai/client/__pycache__/base.cpython-310.pyc +0 -0
  17. clarifai/client/__pycache__/dataset.cpython-310.pyc +0 -0
  18. clarifai/client/__pycache__/input.cpython-310.pyc +0 -0
  19. clarifai/client/__pycache__/lister.cpython-310.pyc +0 -0
  20. clarifai/client/__pycache__/model.cpython-310.pyc +0 -0
  21. clarifai/client/__pycache__/module.cpython-310.pyc +0 -0
  22. clarifai/client/__pycache__/runner.cpython-310.pyc +0 -0
  23. clarifai/client/__pycache__/search.cpython-310.pyc +0 -0
  24. clarifai/client/__pycache__/user.cpython-310.pyc +0 -0
  25. clarifai/client/__pycache__/workflow.cpython-310.pyc +0 -0
  26. clarifai/client/auth/__pycache__/__init__.cpython-310.pyc +0 -0
  27. clarifai/client/auth/__pycache__/helper.cpython-310.pyc +0 -0
  28. clarifai/client/auth/__pycache__/register.cpython-310.pyc +0 -0
  29. clarifai/client/auth/__pycache__/stub.cpython-310.pyc +0 -0
  30. clarifai/client/model.py +90 -365
  31. clarifai/client/model_client.py +400 -0
  32. clarifai/constants/__pycache__/dataset.cpython-310.pyc +0 -0
  33. clarifai/constants/__pycache__/model.cpython-310.pyc +0 -0
  34. clarifai/constants/__pycache__/search.cpython-310.pyc +0 -0
  35. clarifai/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  36. clarifai/datasets/export/__pycache__/__init__.cpython-310.pyc +0 -0
  37. clarifai/datasets/export/__pycache__/inputs_annotations.cpython-310.pyc +0 -0
  38. clarifai/datasets/upload/__pycache__/__init__.cpython-310.pyc +0 -0
  39. clarifai/datasets/upload/__pycache__/base.cpython-310.pyc +0 -0
  40. clarifai/datasets/upload/__pycache__/features.cpython-310.pyc +0 -0
  41. clarifai/datasets/upload/__pycache__/image.cpython-310.pyc +0 -0
  42. clarifai/datasets/upload/__pycache__/text.cpython-310.pyc +0 -0
  43. clarifai/datasets/upload/__pycache__/utils.cpython-310.pyc +0 -0
  44. clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-310.pyc +0 -0
  45. clarifai/datasets/upload/loaders/__pycache__/coco_detection.cpython-310.pyc +0 -0
  46. clarifai/models/__pycache__/__init__.cpython-310.pyc +0 -0
  47. clarifai/models/model_serving/__pycache__/__init__.cpython-310.pyc +0 -0
  48. clarifai/models/model_serving/__pycache__/constants.cpython-310.pyc +0 -0
  49. clarifai/models/model_serving/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  50. clarifai/models/model_serving/cli/__pycache__/_utils.cpython-310.pyc +0 -0
  51. clarifai/models/model_serving/cli/__pycache__/base.cpython-310.pyc +0 -0
  52. clarifai/models/model_serving/cli/__pycache__/build.cpython-310.pyc +0 -0
  53. clarifai/models/model_serving/cli/__pycache__/create.cpython-310.pyc +0 -0
  54. clarifai/models/model_serving/model_config/__pycache__/__init__.cpython-310.pyc +0 -0
  55. clarifai/models/model_serving/model_config/__pycache__/base.cpython-310.pyc +0 -0
  56. clarifai/models/model_serving/model_config/__pycache__/config.cpython-310.pyc +0 -0
  57. clarifai/models/model_serving/model_config/__pycache__/inference_parameter.cpython-310.pyc +0 -0
  58. clarifai/models/model_serving/model_config/__pycache__/output.cpython-310.pyc +0 -0
  59. clarifai/models/model_serving/model_config/triton/__pycache__/__init__.cpython-310.pyc +0 -0
  60. clarifai/models/model_serving/model_config/triton/__pycache__/serializer.cpython-310.pyc +0 -0
  61. clarifai/models/model_serving/model_config/triton/__pycache__/triton_config.cpython-310.pyc +0 -0
  62. clarifai/models/model_serving/model_config/triton/__pycache__/wrappers.cpython-310.pyc +0 -0
  63. clarifai/models/model_serving/repo_build/__pycache__/__init__.cpython-310.pyc +0 -0
  64. clarifai/models/model_serving/repo_build/__pycache__/build.cpython-310.pyc +0 -0
  65. clarifai/models/model_serving/repo_build/static_files/__pycache__/base_test.cpython-310-pytest-7.2.0.pyc +0 -0
  66. clarifai/rag/__pycache__/__init__.cpython-310.pyc +0 -0
  67. clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
  68. clarifai/rag/__pycache__/utils.cpython-310.pyc +0 -0
  69. clarifai/runners/__init__.py +2 -7
  70. clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
  71. clarifai/runners/__pycache__/server.cpython-310.pyc +0 -0
  72. clarifai/runners/dockerfile_template/Dockerfile.debug +11 -0
  73. clarifai/runners/dockerfile_template/Dockerfile.debug~ +9 -0
  74. clarifai/runners/dockerfile_template/Dockerfile.template +3 -0
  75. clarifai/runners/models/__pycache__/__init__.cpython-310.pyc +0 -0
  76. clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
  77. clarifai/runners/models/__pycache__/model_builder.cpython-310.pyc +0 -0
  78. clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
  79. clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
  80. clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
  81. clarifai/runners/models/__pycache__/model_servicer.cpython-310.pyc +0 -0
  82. clarifai/runners/models/__pycache__/model_upload.cpython-310.pyc +0 -0
  83. clarifai/runners/models/model_builder.py +33 -7
  84. clarifai/runners/models/model_class.py +249 -25
  85. clarifai/runners/models/model_runner.py +2 -0
  86. clarifai/runners/models/model_servicer.py +11 -2
  87. clarifai/runners/server.py +5 -1
  88. clarifai/runners/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  89. clarifai/runners/utils/__pycache__/const.cpython-310.pyc +0 -0
  90. clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
  91. clarifai/runners/utils/__pycache__/data_utils.cpython-310.pyc +0 -0
  92. clarifai/runners/utils/__pycache__/loader.cpython-310.pyc +0 -0
  93. clarifai/runners/utils/__pycache__/logging.cpython-310.pyc +0 -0
  94. clarifai/runners/utils/__pycache__/method_signatures.cpython-310.pyc +0 -0
  95. clarifai/runners/utils/__pycache__/serializers.cpython-310.pyc +0 -0
  96. clarifai/runners/utils/__pycache__/url_fetcher.cpython-310.pyc +0 -0
  97. clarifai/runners/utils/data_handler.py +308 -205
  98. clarifai/runners/utils/method_signatures.py +437 -0
  99. clarifai/runners/utils/serializers.py +132 -0
  100. clarifai/schema/__pycache__/search.cpython-310.pyc +0 -0
  101. clarifai/urls/__pycache__/helper.cpython-310.pyc +0 -0
  102. clarifai/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  103. clarifai/utils/__pycache__/logging.cpython-310.pyc +0 -0
  104. clarifai/utils/__pycache__/misc.cpython-310.pyc +0 -0
  105. clarifai/utils/__pycache__/model_train.cpython-310.pyc +0 -0
  106. clarifai/utils/evaluation/__pycache__/__init__.cpython-310.pyc +0 -0
  107. clarifai/utils/evaluation/__pycache__/helpers.cpython-310.pyc +0 -0
  108. clarifai/utils/evaluation/__pycache__/main.cpython-310.pyc +0 -0
  109. clarifai/workflows/__pycache__/__init__.cpython-310.pyc +0 -0
  110. clarifai/workflows/__pycache__/export.cpython-310.pyc +0 -0
  111. clarifai/workflows/__pycache__/utils.cpython-310.pyc +0 -0
  112. clarifai/workflows/__pycache__/validate.cpython-310.pyc +0 -0
  113. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc1.dist-info}/METADATA +16 -26
  114. clarifai-11.1.5rc1.dist-info/RECORD +201 -0
  115. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc1.dist-info}/WHEEL +1 -1
  116. clarifai/runners/models/base_typed_model.py +0 -238
  117. clarifai-11.1.5.dist-info/RECORD +0 -101
  118. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc1.dist-info}/LICENSE +0 -0
  119. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc1.dist-info}/entry_points.txt +0 -0
  120. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,400 @@
1
+ import time
2
+ from typing import Any, Dict, Iterator, List
3
+
4
+ from clarifai_grpc.grpc.api import resources_pb2, service_pb2
5
+ from clarifai_grpc.grpc.api.status import status_code_pb2
6
+
7
+ from clarifai.constants.model import MAX_MODEL_PREDICT_INPUTS
8
+ from clarifai.errors import UserError
9
+ from clarifai.runners.utils.method_signatures import deserialize, serialize, signatures_from_json
10
+ from clarifai.utils.misc import BackoffIterator, status_is_retryable
11
+
12
+
13
+ class ModelClient:
14
+ '''
15
+ Client for calling model predict, generate, and stream methods.
16
+ '''
17
+
18
+ def __init__(self, stub, request_template: service_pb2.PostModelOutputsRequest = None):
19
+ '''
20
+ Initialize the model client.
21
+
22
+ Args:
23
+ stub: The gRPC stub for the model.
24
+ request_template: The template for the request to send to the model, including
25
+ common fields like model_id, model_version, cluster, etc.
26
+ '''
27
+ self.STUB = stub
28
+ self.request_template = request_template or service_pb2.PostModelOutputsRequest()
29
+ self._fetch_signatures()
30
+ self._define_functions()
31
+
32
+ def _fetch_signatures(self):
33
+ '''
34
+ Fetch the method signatures from the model.
35
+
36
+ Returns:
37
+ Dict: The method signatures.
38
+ '''
39
+ #request = resources_pb2.GetModelSignaturesRequest()
40
+ #response = self.stub.GetModelSignatures(request)
41
+ #self._method_signatures = json.loads(response.signatures) # or define protos
42
+ # TODO this could use a new endpoint to get the signatures
43
+ # for local grpc models, we'll also have to add the endpoint to the model servicer
44
+ # for now we'll just use the predict endpoint with a special method name
45
+
46
+ request = service_pb2.PostModelOutputsRequest()
47
+ request.CopyFrom(self.request_template)
48
+ request.model.model_version.output_info.params['_method_name'] = '_GET_SIGNATURES'
49
+ request.inputs.add() # empty input for this method
50
+ start_time = time.time()
51
+ backoff_iterator = BackoffIterator(10)
52
+ while True:
53
+ response = self.STUB.PostModelOutputs(request)
54
+ if status_is_retryable(
55
+ response.status.code) and time.time() - start_time < 60 * 10: # 10 minutes
56
+ self.logger.info(f"Retrying model info fetch with response {response.status!r}")
57
+ time.sleep(next(backoff_iterator))
58
+ continue
59
+
60
+ if response.status.code != status_code_pb2.SUCCESS:
61
+ raise Exception(f"Model failed with response {response.status!r}")
62
+ break
63
+ if response.status.code != status_code_pb2.SUCCESS:
64
+ raise Exception(response.status)
65
+ self._method_signatures = signatures_from_json(response.outputs[0].data.string_value)
66
+
67
+ def _define_functions(self):
68
+ '''
69
+ Define the functions based on the method signatures.
70
+ '''
71
+ for method_name, method_signature in self._method_signatures.items():
72
+ # define the function in this client instance
73
+ if method_signature.method_type == 'predict':
74
+ call_func = self._predict
75
+ elif method_signature.method_type == 'generate':
76
+ call_func = self._generate
77
+ elif method_signature.method_type == 'stream':
78
+ call_func = self._stream
79
+ else:
80
+ raise ValueError(f"Unknown method type {method_signature.method_type}")
81
+
82
+ # method argnames, in order, collapsing nested keys to corresponding user function args
83
+ method_argnames = []
84
+ for var in method_signature.inputs:
85
+ outer = var.name.split('.', 1)[0]
86
+ if outer in method_argnames:
87
+ continue
88
+ method_argnames.append(outer)
89
+
90
+ def bind_f(method_name, method_argnames, call_func):
91
+
92
+ def f(*args, **kwargs):
93
+ if len(args) > len(method_argnames):
94
+ raise TypeError(
95
+ f"{method_name}() takes {len(method_argnames)} positional arguments but {len(args)} were given"
96
+ )
97
+ for name, arg in zip(method_argnames, args): # handle positional with zip shortest
98
+ if name in kwargs:
99
+ raise TypeError(f"Multiple values for argument {name}")
100
+ kwargs[name] = arg
101
+ return call_func(kwargs, method_name)
102
+
103
+ return f
104
+
105
+ # need to bind method_name to the value, not the mutating loop variable
106
+ f = bind_f(method_name, method_argnames, call_func)
107
+
108
+ # set names and docstrings
109
+ # note we could also have used exec with strings from the signature to define the
110
+ # function, but this is safer (no xss), and docstrings with the signature is ok enough
111
+ f.__name__ = method_name
112
+ f.__qualname__ = f'{self.__class__.__name__}.{method_name}'
113
+ input_spec = ', '.join(
114
+ f'{var.name}: {var.data_type}{" = " + str(var.default) if not var.required else ""}'
115
+ for var in method_signature.inputs)
116
+ output_vars = method_signature.outputs
117
+ if len(output_vars) == 1 and output_vars[0].name == 'return':
118
+ # single output
119
+ output_spec = output_vars[0].data_type
120
+ elif output_vars[0].name == 'return.0':
121
+ # tuple output
122
+ output_spec = '(' + ', '.join(var.data_type for var in output_vars) + ')'
123
+ else:
124
+ # named output
125
+ output_spec = f'Output({", ".join(f"{var.name}={var.data_type}" for var in output_vars)})'
126
+ f.__doc__ = f'''{method_name}(self, {input_spec}) -> {output_spec}\n'''
127
+ #f.__doc__ += method_signature.description # TODO
128
+ setattr(self, method_name, f)
129
+
130
+ def _predict(
131
+ self,
132
+ inputs, # TODO set up functions according to fetched signatures?
133
+ method_name: str = 'predict',
134
+ ) -> Any:
135
+ input_signature = self._method_signatures[method_name].inputs
136
+ output_signature = self._method_signatures[method_name].outputs
137
+
138
+ batch_input = True
139
+ if isinstance(inputs, dict):
140
+ inputs = [inputs]
141
+ batch_input = False
142
+
143
+ proto_inputs = []
144
+ for input in inputs:
145
+ proto = resources_pb2.Input()
146
+ serialize(input, input_signature, proto.data)
147
+ proto_inputs.append(proto)
148
+
149
+ response = self._predict_by_proto(proto_inputs, method_name)
150
+ #print(response)
151
+
152
+ outputs = []
153
+ for output in response.outputs:
154
+ outputs.append(deserialize(output.data, output_signature, is_output=True))
155
+ if batch_input:
156
+ return outputs
157
+ return outputs[0]
158
+
159
+ def _predict_by_proto(
160
+ self,
161
+ inputs: List[resources_pb2.Input],
162
+ method_name: str = None,
163
+ inference_params: Dict = None,
164
+ output_config: Dict = None,
165
+ ) -> service_pb2.MultiOutputResponse:
166
+ """Predicts the model based on the given inputs.
167
+
168
+ Args:
169
+ inputs (List[resources_pb2.Input]): The inputs to predict.
170
+ method_name (str): The remote method name to call.
171
+ inference_params (Dict): Inference parameters to override.
172
+ output_config (Dict): Output configuration to override.
173
+
174
+ Returns:
175
+ service_pb2.MultiOutputResponse: The prediction response(s).
176
+ """
177
+ if not isinstance(inputs, list):
178
+ raise UserError('Invalid inputs, inputs must be a list of Input objects.')
179
+ if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
180
+ raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}.")
181
+
182
+ request = service_pb2.PostModelOutputsRequest()
183
+ request.CopyFrom(self.request_template)
184
+
185
+ request.inputs.extend(inputs)
186
+
187
+ if method_name:
188
+ # TODO put in new proto field?
189
+ request.model.model_version.output_info.params['_method_name'] = method_name
190
+ if inference_params:
191
+ request.model.model_version.output_info.params.update(inference_params)
192
+ if output_config:
193
+ request.model.model_version.output_info.output_config.MergeFromDict(output_config)
194
+
195
+ start_time = time.time()
196
+ backoff_iterator = BackoffIterator(10)
197
+ while True:
198
+ response = self.STUB.PostModelOutputs(request)
199
+ if status_is_retryable(
200
+ response.status.code) and time.time() - start_time < 60 * 10: # 10 minutes
201
+ self.logger.info(f"Model predict failed with response {response.status!r}")
202
+ time.sleep(next(backoff_iterator))
203
+ continue
204
+
205
+ if response.status.code != status_code_pb2.SUCCESS:
206
+ raise Exception(f"Model predict failed with response {response.status!r}")
207
+ break
208
+
209
+ return response
210
+
211
+ def _generate(
212
+ self,
213
+ inputs, # TODO set up functions according to fetched signatures?
214
+ method_name: str = 'generate',
215
+ ) -> Any:
216
+ input_signature = self._method_signatures[method_name].inputs
217
+ output_signature = self._method_signatures[method_name].outputs
218
+
219
+ batch_input = True
220
+ if isinstance(inputs, dict):
221
+ inputs = [inputs]
222
+ batch_input = False
223
+
224
+ proto_inputs = []
225
+ for input in inputs:
226
+ proto = resources_pb2.Input()
227
+ serialize(input, input_signature, proto.data)
228
+ proto_inputs.append(proto)
229
+
230
+ response_stream = self._generate_by_proto(proto_inputs, method_name)
231
+ #print(response)
232
+
233
+ for response in response_stream:
234
+ outputs = []
235
+ for output in response.outputs:
236
+ outputs.append(deserialize(output.data, output_signature, is_output=True))
237
+ if batch_input:
238
+ yield outputs
239
+ yield outputs[0]
240
+
241
+ def _generate_by_proto(
242
+ self,
243
+ inputs: List[resources_pb2.Input],
244
+ method_name: str = None,
245
+ inference_params: Dict = {},
246
+ output_config: Dict = {},
247
+ ):
248
+ """Generate the stream output on model based on the given inputs.
249
+
250
+ Args:
251
+ inputs (list[Input]): The inputs to generate, must be less than 128.
252
+ method_name (str): The remote method name to call.
253
+ inference_params (dict): The inference params to override.
254
+ output_config (dict): The output config to override.
255
+ """
256
+ if not isinstance(inputs, list):
257
+ raise UserError('Invalid inputs, inputs must be a list of Input objects.')
258
+ if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
259
+ raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
260
+ ) # TODO Use Chunker for inputs len > 128
261
+
262
+ request = service_pb2.PostModelOutputsRequest()
263
+ request.CopyFrom(self.request_template)
264
+
265
+ request.inputs.extend(inputs)
266
+
267
+ if method_name:
268
+ # TODO put in new proto field?
269
+ request.model.model_version.output_info.params['_method_name'] = method_name
270
+ if inference_params:
271
+ request.model.model_version.output_info.params.update(inference_params)
272
+ if output_config:
273
+ request.model.model_version.output_info.output_config.MergeFromDict(output_config)
274
+
275
+ start_time = time.time()
276
+ backoff_iterator = BackoffIterator(10)
277
+ started = False
278
+ while not started:
279
+ stream_response = self.STUB.GenerateModelOutputs(request)
280
+ try:
281
+ response = next(stream_response) # get the first response
282
+ except StopIteration:
283
+ raise Exception("Model Generate failed with no response")
284
+ if status_is_retryable(response.status.code) and \
285
+ time.time() - start_time < 60 * 10:
286
+ self.logger.info("Model is still deploying, please wait...")
287
+ time.sleep(next(backoff_iterator))
288
+ continue
289
+ if response.status.code != status_code_pb2.SUCCESS:
290
+ raise Exception(f"Model Generate failed with response {response.status!r}")
291
+ started = True
292
+
293
+ yield response # yield the first response
294
+
295
+ for response in stream_response:
296
+ if response.status.code != status_code_pb2.SUCCESS:
297
+ raise Exception(f"Model Generate failed with response {response.status!r}")
298
+ yield response
299
+
300
+ def _stream(
301
+ self,
302
+ inputs,
303
+ method_name: str = 'stream',
304
+ ) -> Any:
305
+ input_signature = self._method_signatures[method_name].inputs
306
+ output_signature = self._method_signatures[method_name].outputs
307
+
308
+ if isinstance(inputs, list):
309
+ assert len(inputs) == 1, 'streaming methods do not support batched calls'
310
+ inputs = inputs[0]
311
+ assert isinstance(inputs, dict)
312
+ kwargs = inputs
313
+
314
+ # find the streaming vars in the input signature, and the streaming input python param
315
+ streaming_var_signatures = [var for var in input_signature if var.streaming]
316
+ stream_argname = set([var.name.split('.', 1)[0] for var in streaming_var_signatures])
317
+ assert len(
318
+ stream_argname) == 1, 'streaming methods must have exactly one streaming function arg'
319
+ stream_argname = stream_argname.pop()
320
+
321
+ # get the streaming input generator from the user-provided function arg values
322
+ user_inputs_generator = kwargs.pop(stream_argname)
323
+
324
+ def _input_proto_stream():
325
+ # first item contains all the inputs and the first stream item
326
+ proto = resources_pb2.Input()
327
+ try:
328
+ item = next(user_inputs_generator)
329
+ except StopIteration:
330
+ return # no items to stream
331
+ kwargs[stream_argname] = item
332
+ serialize(kwargs, input_signature, proto.data)
333
+
334
+ yield proto
335
+
336
+ # subsequent items are just the stream items
337
+ for item in user_inputs_generator:
338
+ proto = resources_pb2.Input()
339
+ serialize({stream_argname: item}, streaming_var_signatures, proto.data)
340
+ yield proto
341
+
342
+ response_stream = self._stream_by_proto(_input_proto_stream(), method_name)
343
+ #print(response)
344
+
345
+ for response in response_stream:
346
+ assert len(response.outputs) == 1, 'streaming methods must have exactly one output'
347
+ yield deserialize(response.outputs[0].data, output_signature, is_output=True)
348
+
349
+ def _req_iterator(self,
350
+ input_iterator: Iterator[List[resources_pb2.Input]],
351
+ method_name: str = None,
352
+ inference_params: Dict = {},
353
+ output_config: Dict = {}):
354
+ request = service_pb2.PostModelOutputsRequest()
355
+ request.CopyFrom(self.request_template)
356
+ request.model.model_version.output_info.params['_method_name'] = method_name
357
+ if inference_params:
358
+ request.model.model_version.output_info.params.update(inference_params)
359
+ if output_config:
360
+ request.model.model_version.output_info.output_config.MergeFromDict(output_config)
361
+ for inputs in input_iterator:
362
+ req = service_pb2.PostModelOutputsRequest()
363
+ req.CopyFrom(request)
364
+ if isinstance(inputs, list):
365
+ req.inputs.extend(inputs)
366
+ else:
367
+ req.inputs.append(inputs)
368
+ yield req
369
+
370
+ def _stream_by_proto(self,
371
+ inputs: Iterator[List[resources_pb2.Input]],
372
+ method_name: str = None,
373
+ inference_params: Dict = {},
374
+ output_config: Dict = {}):
375
+ """Generate the stream output on model based on the given stream of inputs.
376
+ """
377
+ # if not isinstance(inputs, Iterator[List[Input]]):
378
+ # raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
379
+
380
+ request = self._req_iterator(inputs, method_name, inference_params, output_config)
381
+
382
+ start_time = time.time()
383
+ backoff_iterator = BackoffIterator(10)
384
+ generation_started = False
385
+ while True:
386
+ if generation_started:
387
+ break
388
+ stream_response = self.STUB.StreamModelOutputs(request)
389
+ for response in stream_response:
390
+ if status_is_retryable(response.status.code) and \
391
+ time.time() - start_time < 60 * 10:
392
+ self.logger.info("Model is still deploying, please wait...")
393
+ time.sleep(next(backoff_iterator))
394
+ break
395
+ if response.status.code != status_code_pb2.SUCCESS:
396
+ raise Exception(f"Model Predict failed with response {response.status!r}")
397
+ else:
398
+ if not generation_started:
399
+ generation_started = True
400
+ yield response
@@ -1,14 +1,9 @@
1
- from .models.base_typed_model import AnyAnyModel, TextInputModel, VisualInputModel
2
1
  from .models.model_builder import ModelBuilder
2
+ from .models.model_class import ModelClass
3
3
  from .models.model_runner import ModelRunner
4
- from .utils.data_handler import InputDataHandler, OutputDataHandler
5
4
 
6
5
  __all__ = [
7
6
  "ModelRunner",
8
7
  "ModelBuilder",
9
- "InputDataHandler",
10
- "OutputDataHandler",
11
- "AnyAnyModel",
12
- "TextInputModel",
13
- "VisualInputModel",
8
+ "ModelClass",
14
9
  ]
@@ -0,0 +1,11 @@
1
+ FROM --platform=$TARGETPLATFORM public.ecr.aws/docker/library/python:3.12-slim-bookworm as pybase
2
+
3
+ #############################
4
+ # Final runtime image
5
+ #############################
6
+ FROM --platform=$TARGETPLATFORM ${RUNTIME_IMAGE} as final
7
+
8
+ COPY --from=pybase --link=true /usr/bin/ls /usr/bin/cat /usr/bin/which /usr/bin/bash /usr/bin/sort /usr/bin/du /usr/bin/
9
+ COPY --from=pybase --link=true /bin/rbash /bin/sh /bin/rm /bin/
10
+ COPY --from=pybase --link=true /lib/*-linux-gnu/libselinux.so.1 /lib/*-linux-gnu/libpcre2-8.so.0 /lib/x86_64-linux-gnu/
11
+ COPY --from=pybase --link=true /lib/*-linux-gnu/libselinux.so.1 /lib/*-linux-gnu/libpcre2-8.so.0 /lib/aarch64-linux-gnu/
@@ -0,0 +1,9 @@
1
+ FROM --platform=$TARGETPLATFORM public.ecr.aws/docker/library/python:3.12-slim-bookworm as pybase
2
+
3
+ #############################
4
+ # Final runtime image
5
+ #############################
6
+ FROM --platform=$TARGETPLATFORM ${RUNTIME_IMAGE} as final
7
+
8
+ COPY --from=pybase --link=true /usr/bin/ls /usr/bin/cat /usr/bin/which /usr/bin/bash /usr/bin/sort /usr/bin/du /usr/bin/
9
+ COPY --from=pybase --link=true /bin/rbash /bin/sh /bin/rm /bin/
@@ -44,6 +44,9 @@ ENV PYTHONPATH=${PYTHONPATH}:/home/nonroot/main \
44
44
  CLARIFAI_COMPUTE_CLUSTER_ID=${CLARIFAI_COMPUTE_CLUSTER_ID} \
45
45
  CLARIFAI_API_BASE=${CLARIFAI_API_BASE:-https://api.clarifai.com}
46
46
 
47
+ # Write out the model function signatures
48
+ RUN ["python", "-m", "clarifai.cli", "model", "signatures", "--model_path", "/home/nonroot/main", "--out_path", "/home/nonroot/main/signatures.yaml"]
49
+
47
50
  # Finally run the clarifai entrypoint to start the runner loop and local dev server.
48
51
  # Note(zeiler): we may want to make this a clarifai CLI call.
49
52
  ENTRYPOINT ["python", "-m", "clarifai.runners.server"]
@@ -14,13 +14,14 @@ from google.protobuf import json_format
14
14
  from rich import print
15
15
  from rich.markup import escape
16
16
 
17
- from clarifai.client import BaseClient
17
+ from clarifai.client.base import BaseClient
18
18
  from clarifai.runners.models.model_class import ModelClass
19
19
  from clarifai.runners.utils.const import (
20
20
  AVAILABLE_PYTHON_IMAGES, AVAILABLE_TORCH_IMAGES, CONCEPTS_REQUIRED_MODEL_TYPE,
21
21
  DEFAULT_DOWNLOAD_CHECKPOINT_WHEN, DEFAULT_PYTHON_VERSION, DEFAULT_RUNTIME_DOWNLOAD_PATH,
22
22
  PYTHON_BASE_IMAGE, TORCH_BASE_IMAGE)
23
23
  from clarifai.runners.utils.loader import HuggingFaceLoader
24
+ from clarifai.runners.utils.method_signatures import signatures_to_yaml
24
25
  from clarifai.urls.helper import ClarifaiUrlHelper
25
26
  from clarifai.utils.logging import logger
26
27
  from clarifai.versions import CLIENT_VERSION
@@ -69,6 +70,18 @@ class ModelBuilder:
69
70
  """
70
71
  Create an instance of the model class, as specified in the config file.
71
72
  """
73
+ model_class = self.load_model_class()
74
+
75
+ # initialize the model
76
+ model = model_class()
77
+ if load_model:
78
+ model.load_model()
79
+ return model
80
+
81
+ def load_model_class(self):
82
+ """
83
+ Import the model class from the model.py file.
84
+ """
72
85
  # look for default model.py file location
73
86
  for loc in ["model.py", "1/model.py"]:
74
87
  model_file = os.path.join(self.folder, loc)
@@ -107,12 +120,7 @@ class ModelBuilder:
107
120
  "Could not determine model class. There should be exactly one model inheriting from ModelClass defined in the model.py"
108
121
  )
109
122
  model_class = classes[0]
110
-
111
- # initialize the model
112
- model = model_class()
113
- if load_model:
114
- model.load_model()
115
- return model
123
+ return model_class
116
124
 
117
125
  def _validate_folder(self, folder):
118
126
  if folder == ".":
@@ -226,6 +234,15 @@ class ModelBuilder:
226
234
  )
227
235
  logger.info("Continuing without Hugging Face token")
228
236
 
237
+ num_threads = self.config.get("num_threads")
238
+ if num_threads or num_threads == 0:
239
+ assert isinstance(num_threads, int) and num_threads >= 1, ValueError(
240
+ f"`num_threads` must be an integer greater than or equal to 1. Received type {type(num_threads)} with value {num_threads}."
241
+ )
242
+ else:
243
+ num_threads = int(os.environ.get("CLARIFAI_NUM_THREADS", 1))
244
+ self.config["num_threads"] = num_threads
245
+
229
246
  @staticmethod
230
247
  def _get_tar_file_content_size(tar_file_path):
231
248
  """
@@ -244,6 +261,15 @@ class ModelBuilder:
244
261
  total_size += member.size
245
262
  return total_size
246
263
 
264
+ def method_signatures_yaml(self):
265
+ """
266
+ Returns the method signatures for the model class in YAML format.
267
+ """
268
+ model_class = self.load_model_class()
269
+ method_info = model_class._get_method_info()
270
+ signatures = {name: m.signature for name, m in method_info.items()}
271
+ return signatures_to_yaml(signatures)
272
+
247
273
  @property
248
274
  def client(self):
249
275
  if self._client is None: