clarifai 11.1.4rc2__py3-none-any.whl → 11.1.5rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
- clarifai/cli/model.py +46 -10
- clarifai/client/model.py +89 -364
- clarifai/client/model_client.py +400 -0
- clarifai/client/workflow.py +2 -2
- clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/datasets/upload/loaders/__pycache__/coco_detection.cpython-310.pyc +0 -0
- clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
- clarifai/runners/__init__.py +2 -7
- clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/runners/__pycache__/server.cpython-310.pyc +0 -0
- clarifai/runners/dockerfile_template/Dockerfile.template +4 -32
- clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_builder.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_servicer.cpython-310.pyc +0 -0
- clarifai/runners/models/model_builder.py +47 -20
- clarifai/runners/models/model_class.py +249 -25
- clarifai/runners/models/model_run_locally.py +5 -2
- clarifai/runners/models/model_runner.py +2 -0
- clarifai/runners/models/model_servicer.py +11 -2
- clarifai/runners/server.py +26 -9
- clarifai/runners/utils/__pycache__/const.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/method_signatures.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/serializers.cpython-310.pyc +0 -0
- clarifai/runners/utils/const.py +1 -1
- clarifai/runners/utils/data_handler.py +308 -205
- clarifai/runners/utils/method_signatures.py +437 -0
- clarifai/runners/utils/serializers.py +132 -0
- clarifai/utils/evaluation/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/utils/evaluation/__pycache__/helpers.cpython-310.pyc +0 -0
- clarifai/utils/evaluation/__pycache__/main.cpython-310.pyc +0 -0
- clarifai/utils/misc.py +12 -0
- {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/METADATA +3 -2
- {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/RECORD +43 -36
- clarifai/runners/models/base_typed_model.py +0 -238
- clarifai/runners/models/model_upload.py +0 -607
- clarifai/runners/utils/#const.py# +0 -30
- {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/LICENSE +0 -0
- {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/WHEEL +0 -0
- {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/entry_points.txt +0 -0
- {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,400 @@
|
|
1
|
+
import time
|
2
|
+
from typing import Any, Dict, Iterator, List
|
3
|
+
|
4
|
+
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
|
5
|
+
from clarifai_grpc.grpc.api.status import status_code_pb2
|
6
|
+
|
7
|
+
from clarifai.constants.model import MAX_MODEL_PREDICT_INPUTS
|
8
|
+
from clarifai.errors import UserError
|
9
|
+
from clarifai.runners.utils.method_signatures import deserialize, serialize, signatures_from_json
|
10
|
+
from clarifai.utils.misc import BackoffIterator, status_is_retryable
|
11
|
+
|
12
|
+
|
13
|
+
class ModelClient:
|
14
|
+
'''
|
15
|
+
Client for calling model predict, generate, and stream methods.
|
16
|
+
'''
|
17
|
+
|
18
|
+
def __init__(self, stub, request_template: service_pb2.PostModelOutputsRequest = None):
|
19
|
+
'''
|
20
|
+
Initialize the model client.
|
21
|
+
|
22
|
+
Args:
|
23
|
+
stub: The gRPC stub for the model.
|
24
|
+
request_template: The template for the request to send to the model, including
|
25
|
+
common fields like model_id, model_version, cluster, etc.
|
26
|
+
'''
|
27
|
+
self.STUB = stub
|
28
|
+
self.request_template = request_template or service_pb2.PostModelOutputsRequest()
|
29
|
+
self._fetch_signatures()
|
30
|
+
self._define_functions()
|
31
|
+
|
32
|
+
def _fetch_signatures(self):
|
33
|
+
'''
|
34
|
+
Fetch the method signatures from the model.
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
Dict: The method signatures.
|
38
|
+
'''
|
39
|
+
#request = resources_pb2.GetModelSignaturesRequest()
|
40
|
+
#response = self.stub.GetModelSignatures(request)
|
41
|
+
#self._method_signatures = json.loads(response.signatures) # or define protos
|
42
|
+
# TODO this could use a new endpoint to get the signatures
|
43
|
+
# for local grpc models, we'll also have to add the endpoint to the model servicer
|
44
|
+
# for now we'll just use the predict endpoint with a special method name
|
45
|
+
|
46
|
+
request = service_pb2.PostModelOutputsRequest()
|
47
|
+
request.CopyFrom(self.request_template)
|
48
|
+
request.model.model_version.output_info.params['_method_name'] = '_GET_SIGNATURES'
|
49
|
+
request.inputs.add() # empty input for this method
|
50
|
+
start_time = time.time()
|
51
|
+
backoff_iterator = BackoffIterator(10)
|
52
|
+
while True:
|
53
|
+
response = self.STUB.PostModelOutputs(request)
|
54
|
+
if status_is_retryable(
|
55
|
+
response.status.code) and time.time() - start_time < 60 * 10: # 10 minutes
|
56
|
+
self.logger.info(f"Retrying model info fetch with response {response.status!r}")
|
57
|
+
time.sleep(next(backoff_iterator))
|
58
|
+
continue
|
59
|
+
|
60
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
61
|
+
raise Exception(f"Model failed with response {response.status!r}")
|
62
|
+
break
|
63
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
64
|
+
raise Exception(response.status)
|
65
|
+
self._method_signatures = signatures_from_json(response.outputs[0].data.string_value)
|
66
|
+
|
67
|
+
def _define_functions(self):
|
68
|
+
'''
|
69
|
+
Define the functions based on the method signatures.
|
70
|
+
'''
|
71
|
+
for method_name, method_signature in self._method_signatures.items():
|
72
|
+
# define the function in this client instance
|
73
|
+
if method_signature.method_type == 'predict':
|
74
|
+
call_func = self._predict
|
75
|
+
elif method_signature.method_type == 'generate':
|
76
|
+
call_func = self._generate
|
77
|
+
elif method_signature.method_type == 'stream':
|
78
|
+
call_func = self._stream
|
79
|
+
else:
|
80
|
+
raise ValueError(f"Unknown method type {method_signature.method_type}")
|
81
|
+
|
82
|
+
# method argnames, in order, collapsing nested keys to corresponding user function args
|
83
|
+
method_argnames = []
|
84
|
+
for var in method_signature.inputs:
|
85
|
+
outer = var.name.split('.', 1)[0]
|
86
|
+
if outer in method_argnames:
|
87
|
+
continue
|
88
|
+
method_argnames.append(outer)
|
89
|
+
|
90
|
+
def bind_f(method_name, method_argnames, call_func):
|
91
|
+
|
92
|
+
def f(*args, **kwargs):
|
93
|
+
if len(args) > len(method_argnames):
|
94
|
+
raise TypeError(
|
95
|
+
f"{method_name}() takes {len(method_argnames)} positional arguments but {len(args)} were given"
|
96
|
+
)
|
97
|
+
for name, arg in zip(method_argnames, args): # handle positional with zip shortest
|
98
|
+
if name in kwargs:
|
99
|
+
raise TypeError(f"Multiple values for argument {name}")
|
100
|
+
kwargs[name] = arg
|
101
|
+
return call_func(kwargs, method_name)
|
102
|
+
|
103
|
+
return f
|
104
|
+
|
105
|
+
# need to bind method_name to the value, not the mutating loop variable
|
106
|
+
f = bind_f(method_name, method_argnames, call_func)
|
107
|
+
|
108
|
+
# set names and docstrings
|
109
|
+
# note we could also have used exec with strings from the signature to define the
|
110
|
+
# function, but this is safer (no xss), and docstrings with the signature is ok enough
|
111
|
+
f.__name__ = method_name
|
112
|
+
f.__qualname__ = f'{self.__class__.__name__}.{method_name}'
|
113
|
+
input_spec = ', '.join(
|
114
|
+
f'{var.name}: {var.data_type}{" = " + str(var.default) if not var.required else ""}'
|
115
|
+
for var in method_signature.inputs)
|
116
|
+
output_vars = method_signature.outputs
|
117
|
+
if len(output_vars) == 1 and output_vars[0].name == 'return':
|
118
|
+
# single output
|
119
|
+
output_spec = output_vars[0].data_type
|
120
|
+
elif output_vars[0].name == 'return.0':
|
121
|
+
# tuple output
|
122
|
+
output_spec = '(' + ', '.join(var.data_type for var in output_vars) + ')'
|
123
|
+
else:
|
124
|
+
# named output
|
125
|
+
output_spec = f'Output({", ".join(f"{var.name}={var.data_type}" for var in output_vars)})'
|
126
|
+
f.__doc__ = f'''{method_name}(self, {input_spec}) -> {output_spec}\n'''
|
127
|
+
#f.__doc__ += method_signature.description # TODO
|
128
|
+
setattr(self, method_name, f)
|
129
|
+
|
130
|
+
def _predict(
|
131
|
+
self,
|
132
|
+
inputs, # TODO set up functions according to fetched signatures?
|
133
|
+
method_name: str = 'predict',
|
134
|
+
) -> Any:
|
135
|
+
input_signature = self._method_signatures[method_name].inputs
|
136
|
+
output_signature = self._method_signatures[method_name].outputs
|
137
|
+
|
138
|
+
batch_input = True
|
139
|
+
if isinstance(inputs, dict):
|
140
|
+
inputs = [inputs]
|
141
|
+
batch_input = False
|
142
|
+
|
143
|
+
proto_inputs = []
|
144
|
+
for input in inputs:
|
145
|
+
proto = resources_pb2.Input()
|
146
|
+
serialize(input, input_signature, proto.data)
|
147
|
+
proto_inputs.append(proto)
|
148
|
+
|
149
|
+
response = self._predict_by_proto(proto_inputs, method_name)
|
150
|
+
#print(response)
|
151
|
+
|
152
|
+
outputs = []
|
153
|
+
for output in response.outputs:
|
154
|
+
outputs.append(deserialize(output.data, output_signature, is_output=True))
|
155
|
+
if batch_input:
|
156
|
+
return outputs
|
157
|
+
return outputs[0]
|
158
|
+
|
159
|
+
def _predict_by_proto(
|
160
|
+
self,
|
161
|
+
inputs: List[resources_pb2.Input],
|
162
|
+
method_name: str = None,
|
163
|
+
inference_params: Dict = None,
|
164
|
+
output_config: Dict = None,
|
165
|
+
) -> service_pb2.MultiOutputResponse:
|
166
|
+
"""Predicts the model based on the given inputs.
|
167
|
+
|
168
|
+
Args:
|
169
|
+
inputs (List[resources_pb2.Input]): The inputs to predict.
|
170
|
+
method_name (str): The remote method name to call.
|
171
|
+
inference_params (Dict): Inference parameters to override.
|
172
|
+
output_config (Dict): Output configuration to override.
|
173
|
+
|
174
|
+
Returns:
|
175
|
+
service_pb2.MultiOutputResponse: The prediction response(s).
|
176
|
+
"""
|
177
|
+
if not isinstance(inputs, list):
|
178
|
+
raise UserError('Invalid inputs, inputs must be a list of Input objects.')
|
179
|
+
if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
|
180
|
+
raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}.")
|
181
|
+
|
182
|
+
request = service_pb2.PostModelOutputsRequest()
|
183
|
+
request.CopyFrom(self.request_template)
|
184
|
+
|
185
|
+
request.inputs.extend(inputs)
|
186
|
+
|
187
|
+
if method_name:
|
188
|
+
# TODO put in new proto field?
|
189
|
+
request.model.model_version.output_info.params['_method_name'] = method_name
|
190
|
+
if inference_params:
|
191
|
+
request.model.model_version.output_info.params.update(inference_params)
|
192
|
+
if output_config:
|
193
|
+
request.model.model_version.output_info.output_config.MergeFromDict(output_config)
|
194
|
+
|
195
|
+
start_time = time.time()
|
196
|
+
backoff_iterator = BackoffIterator(10)
|
197
|
+
while True:
|
198
|
+
response = self.STUB.PostModelOutputs(request)
|
199
|
+
if status_is_retryable(
|
200
|
+
response.status.code) and time.time() - start_time < 60 * 10: # 10 minutes
|
201
|
+
self.logger.info(f"Model predict failed with response {response.status!r}")
|
202
|
+
time.sleep(next(backoff_iterator))
|
203
|
+
continue
|
204
|
+
|
205
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
206
|
+
raise Exception(f"Model predict failed with response {response.status!r}")
|
207
|
+
break
|
208
|
+
|
209
|
+
return response
|
210
|
+
|
211
|
+
def _generate(
|
212
|
+
self,
|
213
|
+
inputs, # TODO set up functions according to fetched signatures?
|
214
|
+
method_name: str = 'generate',
|
215
|
+
) -> Any:
|
216
|
+
input_signature = self._method_signatures[method_name].inputs
|
217
|
+
output_signature = self._method_signatures[method_name].outputs
|
218
|
+
|
219
|
+
batch_input = True
|
220
|
+
if isinstance(inputs, dict):
|
221
|
+
inputs = [inputs]
|
222
|
+
batch_input = False
|
223
|
+
|
224
|
+
proto_inputs = []
|
225
|
+
for input in inputs:
|
226
|
+
proto = resources_pb2.Input()
|
227
|
+
serialize(input, input_signature, proto.data)
|
228
|
+
proto_inputs.append(proto)
|
229
|
+
|
230
|
+
response_stream = self._generate_by_proto(proto_inputs, method_name)
|
231
|
+
#print(response)
|
232
|
+
|
233
|
+
for response in response_stream:
|
234
|
+
outputs = []
|
235
|
+
for output in response.outputs:
|
236
|
+
outputs.append(deserialize(output.data, output_signature, is_output=True))
|
237
|
+
if batch_input:
|
238
|
+
yield outputs
|
239
|
+
yield outputs[0]
|
240
|
+
|
241
|
+
def _generate_by_proto(
|
242
|
+
self,
|
243
|
+
inputs: List[resources_pb2.Input],
|
244
|
+
method_name: str = None,
|
245
|
+
inference_params: Dict = {},
|
246
|
+
output_config: Dict = {},
|
247
|
+
):
|
248
|
+
"""Generate the stream output on model based on the given inputs.
|
249
|
+
|
250
|
+
Args:
|
251
|
+
inputs (list[Input]): The inputs to generate, must be less than 128.
|
252
|
+
method_name (str): The remote method name to call.
|
253
|
+
inference_params (dict): The inference params to override.
|
254
|
+
output_config (dict): The output config to override.
|
255
|
+
"""
|
256
|
+
if not isinstance(inputs, list):
|
257
|
+
raise UserError('Invalid inputs, inputs must be a list of Input objects.')
|
258
|
+
if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
|
259
|
+
raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
|
260
|
+
) # TODO Use Chunker for inputs len > 128
|
261
|
+
|
262
|
+
request = service_pb2.PostModelOutputsRequest()
|
263
|
+
request.CopyFrom(self.request_template)
|
264
|
+
|
265
|
+
request.inputs.extend(inputs)
|
266
|
+
|
267
|
+
if method_name:
|
268
|
+
# TODO put in new proto field?
|
269
|
+
request.model.model_version.output_info.params['_method_name'] = method_name
|
270
|
+
if inference_params:
|
271
|
+
request.model.model_version.output_info.params.update(inference_params)
|
272
|
+
if output_config:
|
273
|
+
request.model.model_version.output_info.output_config.MergeFromDict(output_config)
|
274
|
+
|
275
|
+
start_time = time.time()
|
276
|
+
backoff_iterator = BackoffIterator(10)
|
277
|
+
started = False
|
278
|
+
while not started:
|
279
|
+
stream_response = self.STUB.GenerateModelOutputs(request)
|
280
|
+
try:
|
281
|
+
response = next(stream_response) # get the first response
|
282
|
+
except StopIteration:
|
283
|
+
raise Exception("Model Generate failed with no response")
|
284
|
+
if status_is_retryable(response.status.code) and \
|
285
|
+
time.time() - start_time < 60 * 10:
|
286
|
+
self.logger.info("Model is still deploying, please wait...")
|
287
|
+
time.sleep(next(backoff_iterator))
|
288
|
+
continue
|
289
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
290
|
+
raise Exception(f"Model Generate failed with response {response.status!r}")
|
291
|
+
started = True
|
292
|
+
|
293
|
+
yield response # yield the first response
|
294
|
+
|
295
|
+
for response in stream_response:
|
296
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
297
|
+
raise Exception(f"Model Generate failed with response {response.status!r}")
|
298
|
+
yield response
|
299
|
+
|
300
|
+
def _stream(
|
301
|
+
self,
|
302
|
+
inputs,
|
303
|
+
method_name: str = 'stream',
|
304
|
+
) -> Any:
|
305
|
+
input_signature = self._method_signatures[method_name].inputs
|
306
|
+
output_signature = self._method_signatures[method_name].outputs
|
307
|
+
|
308
|
+
if isinstance(inputs, list):
|
309
|
+
assert len(inputs) == 1, 'streaming methods do not support batched calls'
|
310
|
+
inputs = inputs[0]
|
311
|
+
assert isinstance(inputs, dict)
|
312
|
+
kwargs = inputs
|
313
|
+
|
314
|
+
# find the streaming vars in the input signature, and the streaming input python param
|
315
|
+
streaming_var_signatures = [var for var in input_signature if var.streaming]
|
316
|
+
stream_argname = set([var.name.split('.', 1)[0] for var in streaming_var_signatures])
|
317
|
+
assert len(
|
318
|
+
stream_argname) == 1, 'streaming methods must have exactly one streaming function arg'
|
319
|
+
stream_argname = stream_argname.pop()
|
320
|
+
|
321
|
+
# get the streaming input generator from the user-provided function arg values
|
322
|
+
user_inputs_generator = kwargs.pop(stream_argname)
|
323
|
+
|
324
|
+
def _input_proto_stream():
|
325
|
+
# first item contains all the inputs and the first stream item
|
326
|
+
proto = resources_pb2.Input()
|
327
|
+
try:
|
328
|
+
item = next(user_inputs_generator)
|
329
|
+
except StopIteration:
|
330
|
+
return # no items to stream
|
331
|
+
kwargs[stream_argname] = item
|
332
|
+
serialize(kwargs, input_signature, proto.data)
|
333
|
+
|
334
|
+
yield proto
|
335
|
+
|
336
|
+
# subsequent items are just the stream items
|
337
|
+
for item in user_inputs_generator:
|
338
|
+
proto = resources_pb2.Input()
|
339
|
+
serialize({stream_argname: item}, streaming_var_signatures, proto.data)
|
340
|
+
yield proto
|
341
|
+
|
342
|
+
response_stream = self._stream_by_proto(_input_proto_stream(), method_name)
|
343
|
+
#print(response)
|
344
|
+
|
345
|
+
for response in response_stream:
|
346
|
+
assert len(response.outputs) == 1, 'streaming methods must have exactly one output'
|
347
|
+
yield deserialize(response.outputs[0].data, output_signature, is_output=True)
|
348
|
+
|
349
|
+
def _req_iterator(self,
|
350
|
+
input_iterator: Iterator[List[resources_pb2.Input]],
|
351
|
+
method_name: str = None,
|
352
|
+
inference_params: Dict = {},
|
353
|
+
output_config: Dict = {}):
|
354
|
+
request = service_pb2.PostModelOutputsRequest()
|
355
|
+
request.CopyFrom(self.request_template)
|
356
|
+
request.model.model_version.output_info.params['_method_name'] = method_name
|
357
|
+
if inference_params:
|
358
|
+
request.model.model_version.output_info.params.update(inference_params)
|
359
|
+
if output_config:
|
360
|
+
request.model.model_version.output_info.output_config.MergeFromDict(output_config)
|
361
|
+
for inputs in input_iterator:
|
362
|
+
req = service_pb2.PostModelOutputsRequest()
|
363
|
+
req.CopyFrom(request)
|
364
|
+
if isinstance(inputs, list):
|
365
|
+
req.inputs.extend(inputs)
|
366
|
+
else:
|
367
|
+
req.inputs.append(inputs)
|
368
|
+
yield req
|
369
|
+
|
370
|
+
def _stream_by_proto(self,
|
371
|
+
inputs: Iterator[List[resources_pb2.Input]],
|
372
|
+
method_name: str = None,
|
373
|
+
inference_params: Dict = {},
|
374
|
+
output_config: Dict = {}):
|
375
|
+
"""Generate the stream output on model based on the given stream of inputs.
|
376
|
+
"""
|
377
|
+
# if not isinstance(inputs, Iterator[List[Input]]):
|
378
|
+
# raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
|
379
|
+
|
380
|
+
request = self._req_iterator(inputs, method_name, inference_params, output_config)
|
381
|
+
|
382
|
+
start_time = time.time()
|
383
|
+
backoff_iterator = BackoffIterator(10)
|
384
|
+
generation_started = False
|
385
|
+
while True:
|
386
|
+
if generation_started:
|
387
|
+
break
|
388
|
+
stream_response = self.STUB.StreamModelOutputs(request)
|
389
|
+
for response in stream_response:
|
390
|
+
if status_is_retryable(response.status.code) and \
|
391
|
+
time.time() - start_time < 60 * 10:
|
392
|
+
self.logger.info("Model is still deploying, please wait...")
|
393
|
+
time.sleep(next(backoff_iterator))
|
394
|
+
break
|
395
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
396
|
+
raise Exception(f"Model Predict failed with response {response.status!r}")
|
397
|
+
else:
|
398
|
+
if not generation_started:
|
399
|
+
generation_started = True
|
400
|
+
yield response
|
clarifai/client/workflow.py
CHANGED
@@ -15,7 +15,7 @@ from clarifai.constants.workflow import MAX_WORKFLOW_PREDICT_INPUTS
|
|
15
15
|
from clarifai.errors import UserError
|
16
16
|
from clarifai.urls.helper import ClarifaiUrlHelper
|
17
17
|
from clarifai.utils.logging import logger
|
18
|
-
from clarifai.utils.misc import BackoffIterator
|
18
|
+
from clarifai.utils.misc import BackoffIterator, status_is_retryable
|
19
19
|
from clarifai.workflows.export import Exporter
|
20
20
|
|
21
21
|
|
@@ -99,7 +99,7 @@ class Workflow(Lister, BaseClient):
|
|
99
99
|
while True:
|
100
100
|
response = self._grpc_request(self.STUB.PostWorkflowResults, request)
|
101
101
|
|
102
|
-
if response.status.code
|
102
|
+
if status_is_retryable(response.status.code) and \
|
103
103
|
time.time() - start_time < 60*10: # 10 minutes
|
104
104
|
self.logger.info(f"{self.id} Workflow is still deploying, please wait...")
|
105
105
|
time.sleep(next(backoff_iterator))
|
Binary file
|
Binary file
|
clarifai/runners/__init__.py
CHANGED
@@ -1,14 +1,9 @@
|
|
1
|
-
from .models.base_typed_model import AnyAnyModel, TextInputModel, VisualInputModel
|
2
1
|
from .models.model_builder import ModelBuilder
|
2
|
+
from .models.model_class import ModelClass
|
3
3
|
from .models.model_runner import ModelRunner
|
4
|
-
from .utils.data_handler import InputDataHandler, OutputDataHandler
|
5
4
|
|
6
5
|
__all__ = [
|
7
6
|
"ModelRunner",
|
8
7
|
"ModelBuilder",
|
9
|
-
"
|
10
|
-
"OutputDataHandler",
|
11
|
-
"AnyAnyModel",
|
12
|
-
"TextInputModel",
|
13
|
-
"VisualInputModel",
|
8
|
+
"ModelClass",
|
14
9
|
]
|
Binary file
|
Binary file
|
@@ -1,30 +1,11 @@
|
|
1
1
|
# syntax=docker/dockerfile:1.13-labs
|
2
|
-
|
3
|
-
# User specific requirements installed in the pip_packages
|
4
|
-
#############################
|
5
|
-
FROM --platform=$TARGETPLATFORM ${FINAL_IMAGE} as pip_packages
|
2
|
+
FROM --platform=$TARGETPLATFORM ${FINAL_IMAGE} as final
|
6
3
|
|
7
4
|
COPY --link requirements.txt /home/nonroot/requirements.txt
|
8
5
|
|
9
6
|
# Update clarifai package so we always have latest protocol to the API. Everything should land in /venv
|
10
7
|
RUN ["pip", "install", "--no-cache-dir", "-r", "/home/nonroot/requirements.txt"]
|
11
8
|
RUN ["pip", "show", "clarifai"]
|
12
|
-
#############################
|
13
|
-
|
14
|
-
#############################
|
15
|
-
# Downloader dependencies image
|
16
|
-
#############################
|
17
|
-
FROM --platform=$TARGETPLATFORM ${DOWNLOADER_IMAGE} as downloader
|
18
|
-
|
19
|
-
# make sure we have the latest clarifai package. This version is filled in by SDK.
|
20
|
-
RUN ["pip", "install", "clarifai==${CLARIFAI_VERSION}"]
|
21
|
-
#####
|
22
|
-
|
23
|
-
|
24
|
-
#############################
|
25
|
-
# Final runtime image
|
26
|
-
#############################
|
27
|
-
FROM --platform=$TARGETPLATFORM ${FINAL_IMAGE} as final
|
28
9
|
|
29
10
|
# Set the NUMBA cache dir to /tmp
|
30
11
|
# Set the TORCHINDUCTOR cache dir to /tmp
|
@@ -34,12 +15,6 @@ ENV NUMBA_CACHE_DIR=/tmp/numba_cache \
|
|
34
15
|
HOME=/tmp \
|
35
16
|
DEBIAN_FRONTEND=noninteractive
|
36
17
|
|
37
|
-
#####
|
38
|
-
# Copy the python requirements needed to download checkpoints
|
39
|
-
#####
|
40
|
-
COPY --link=true --from=downloader /venv /venv
|
41
|
-
#####
|
42
|
-
|
43
18
|
#####
|
44
19
|
# Copy the files needed to download
|
45
20
|
#####
|
@@ -52,12 +27,6 @@ COPY --link=true config.yaml /home/nonroot/main/
|
|
52
27
|
RUN ["python", "-m", "clarifai.cli", "model", "download-checkpoints", "--model_path", "/home/nonroot/main", "--out_path", "/home/nonroot/main/1/checkpoints", "--stage", "build"]
|
53
28
|
#####
|
54
29
|
|
55
|
-
|
56
|
-
#####
|
57
|
-
# Copy the python packages from the builder stage.
|
58
|
-
COPY --link=true --from=pip_packages /venv /venv
|
59
|
-
#####
|
60
|
-
|
61
30
|
# Copy in the actual files like config.yaml, requirements.txt, and most importantly 1/model.py
|
62
31
|
# for the actual model.
|
63
32
|
# If checkpoints aren't downloaded since a checkpoints: block is not provided, then they will
|
@@ -75,6 +44,9 @@ ENV PYTHONPATH=${PYTHONPATH}:/home/nonroot/main \
|
|
75
44
|
CLARIFAI_COMPUTE_CLUSTER_ID=${CLARIFAI_COMPUTE_CLUSTER_ID} \
|
76
45
|
CLARIFAI_API_BASE=${CLARIFAI_API_BASE:-https://api.clarifai.com}
|
77
46
|
|
47
|
+
# Write out the model function signatures
|
48
|
+
RUN ["python", "-m", "clarifai.cli", "model", "signatures", "--model_path", "/home/nonroot/main", "--out_path", "/home/nonroot/main/signatures.yaml"]
|
49
|
+
|
78
50
|
# Finally run the clarifai entrypoint to start the runner loop and local dev server.
|
79
51
|
# Note(zeiler): we may want to make this a clarifai CLI call.
|
80
52
|
ENTRYPOINT ["python", "-m", "clarifai.runners.server"]
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|