clarifai 10.2.0__py3-none-any.whl → 10.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/client/app.py +21 -10
- clarifai/client/auth/helper.py +12 -2
- clarifai/client/base.py +14 -4
- clarifai/client/dataset.py +59 -8
- clarifai/client/input.py +15 -2
- clarifai/client/model.py +201 -21
- clarifai/client/module.py +9 -1
- clarifai/client/search.py +10 -2
- clarifai/client/user.py +22 -14
- clarifai/client/workflow.py +10 -2
- clarifai/constants/input.py +1 -0
- clarifai/datasets/export/inputs_annotations.py +18 -12
- clarifai/utils/evaluation/__init__.py +2 -426
- clarifai/utils/evaluation/main.py +426 -0
- clarifai/utils/evaluation/testset_annotation_parser.py +150 -0
- clarifai/utils/misc.py +4 -10
- clarifai/utils/model_train.py +6 -7
- clarifai/versions.py +1 -1
- {clarifai-10.2.0.dist-info → clarifai-10.3.0.dist-info}/METADATA +23 -15
- {clarifai-10.2.0.dist-info → clarifai-10.3.0.dist-info}/RECORD +24 -25
- {clarifai-10.2.0.dist-info → clarifai-10.3.0.dist-info}/WHEEL +1 -1
- clarifai/client/runner.py +0 -234
- clarifai/runners/__init__.py +0 -0
- clarifai/runners/example.py +0 -40
- clarifai/runners/example_llama2.py +0 -81
- {clarifai-10.2.0.dist-info → clarifai-10.3.0.dist-info}/LICENSE +0 -0
- {clarifai-10.2.0.dist-info → clarifai-10.3.0.dist-info}/entry_points.txt +0 -0
- {clarifai-10.2.0.dist-info → clarifai-10.3.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: clarifai
|
3
|
-
Version: 10.
|
3
|
+
Version: 10.3.0
|
4
4
|
Summary: Clarifai Python SDK
|
5
5
|
Home-page: https://github.com/Clarifai/clarifai-python
|
6
6
|
Author: Clarifai
|
@@ -20,18 +20,18 @@ Classifier: Operating System :: OS Independent
|
|
20
20
|
Requires-Python: >=3.8
|
21
21
|
Description-Content-Type: text/markdown
|
22
22
|
License-File: LICENSE
|
23
|
-
Requires-Dist: clarifai-grpc
|
24
|
-
Requires-Dist: numpy
|
25
|
-
Requires-Dist: tqdm
|
26
|
-
Requires-Dist: tritonclient
|
27
|
-
Requires-Dist: rich
|
28
|
-
Requires-Dist: PyYAML
|
29
|
-
Requires-Dist: schema
|
30
|
-
Requires-Dist: Pillow
|
31
|
-
Requires-Dist: inquirerpy
|
32
|
-
Requires-Dist: tabulate
|
23
|
+
Requires-Dist: clarifai-grpc ~=10.2.3
|
24
|
+
Requires-Dist: numpy >=1.22.0
|
25
|
+
Requires-Dist: tqdm >=4.65.0
|
26
|
+
Requires-Dist: tritonclient >=2.34.0
|
27
|
+
Requires-Dist: rich >=13.4.2
|
28
|
+
Requires-Dist: PyYAML >=6.0.1
|
29
|
+
Requires-Dist: schema >=0.7.5
|
30
|
+
Requires-Dist: Pillow >=9.5.0
|
31
|
+
Requires-Dist: inquirerpy ==0.3.4
|
32
|
+
Requires-Dist: tabulate >=0.9.0
|
33
33
|
Provides-Extra: all
|
34
|
-
Requires-Dist: pycocotools
|
34
|
+
Requires-Dist: pycocotools ==2.0.6 ; extra == 'all'
|
35
35
|
|
36
36
|
<h1 align="center">
|
37
37
|
<a href="https://www.clarifai.com/"><img alt="Clarifai" title="Clarifai" src="https://upload.wikimedia.org/wikipedia/commons/b/bc/Clarifai_Logo_FC_Web.png"></a>
|
@@ -183,9 +183,7 @@ dataset.upload_from_csv(csv_path='csv_path', input_type='text', csv_type='raw',
|
|
183
183
|
dataset.upload_from_folder(folder_path='folder_path', input_type='text', labels=True)
|
184
184
|
|
185
185
|
# Export Dataset
|
186
|
-
|
187
|
-
# Note: clarifai-data-protobuf.zip is acquired through exporting datasets within the Clarifai Platform.
|
188
|
-
Dataset().export(save_path='output.zip', local_archive_path='clarifai-data-protobuf.zip')
|
186
|
+
dataset.export(save_path='output.zip')
|
189
187
|
```
|
190
188
|
|
191
189
|
|
@@ -307,6 +305,16 @@ status = model.training_status(version_id=model_version_id,training_logs=True)
|
|
307
305
|
print(status)
|
308
306
|
```
|
309
307
|
|
308
|
+
#### Export your trained model
|
309
|
+
Model Export feature enables you to package your trained model into a `model.tar` file. This file enables deploying your model within a Triton Inference Server deployment.
|
310
|
+
|
311
|
+
```python
|
312
|
+
from clarifai.client.model import Model
|
313
|
+
|
314
|
+
model = Model('url')
|
315
|
+
model.export('output/folder/')
|
316
|
+
```
|
317
|
+
|
310
318
|
#### Evaluate your trained model
|
311
319
|
|
312
320
|
When your model is trained and ready, you can evaluate by the following code
|
@@ -1,31 +1,31 @@
|
|
1
1
|
clarifai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
2
|
clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
clarifai/errors.py,sha256=RwzTajwds51wLD0MVlMC5kcpBnzRpreDLlazPSBZxrg,2605
|
4
|
-
clarifai/versions.py,sha256=
|
4
|
+
clarifai/versions.py,sha256=mAiLKe7P3acWV9a8JoMJZNZ_fCVMfA3wKDrVlelETJY,186
|
5
5
|
clarifai/client/__init__.py,sha256=xI1U0l5AZdRThvQAXCLsd9axxyFzXXJ22m8LHqVjQRU,662
|
6
|
-
clarifai/client/app.py,sha256=
|
7
|
-
clarifai/client/base.py,sha256=
|
8
|
-
clarifai/client/dataset.py,sha256=
|
9
|
-
clarifai/client/input.py,sha256=
|
6
|
+
clarifai/client/app.py,sha256=LC3rnuqr97f-S7LsJ9Q7KZnYMBHGDAq4mcFjjcPjpqo,27240
|
7
|
+
clarifai/client/base.py,sha256=FrnSY9tSxjTxhABfBzQz5-PEppWMPbIyvSNnx8mVz8s,6919
|
8
|
+
clarifai/client/dataset.py,sha256=wKDpON8kbOXpbfOkre5EYKXT4zEX-5xu9LgtORNuhog,29903
|
9
|
+
clarifai/client/input.py,sha256=hPCMj4f2YQhXKz4ziDDoWqMVhN88JprVm0sMhKkStEI,40574
|
10
10
|
clarifai/client/lister.py,sha256=03KGMvs5RVyYqxLsSrWhNc34I8kiF1Ph0NeyEwu7nMU,2082
|
11
|
-
clarifai/client/model.py,sha256=
|
12
|
-
clarifai/client/module.py,sha256=
|
13
|
-
clarifai/client/
|
14
|
-
clarifai/client/
|
15
|
-
clarifai/client/
|
16
|
-
clarifai/client/workflow.py,sha256=oALMJfdgTqiilfpDT3H_nepqX9mexLu-uWV0NvtxUs0,10291
|
11
|
+
clarifai/client/model.py,sha256=CYWoIsg3i0HbPrdzfto8HPklkGO12u8YnVImDExYwNY,44093
|
12
|
+
clarifai/client/module.py,sha256=360JaOasX0DZCNE_Trj0LNTr-T_tUDZLfGpz0CdIi78,4248
|
13
|
+
clarifai/client/search.py,sha256=naH8bBpl5UTlJT3WdyNd9TARGlaUtDPRvFf8IIUTDQA,11062
|
14
|
+
clarifai/client/user.py,sha256=EQTeotfYTNedGcbTICYOUJqKgWhfVHvaMRTJ1hdoIdQ,10372
|
15
|
+
clarifai/client/workflow.py,sha256=e3axkhU6c6WcxK9P5tgmnV464k-afslSzsSXx6nSMgA,10560
|
17
16
|
clarifai/client/auth/__init__.py,sha256=7EwR0NrozkAUwpUnCsqXvE_p0wqx_SelXlSpKShKJK0,136
|
18
|
-
clarifai/client/auth/helper.py,sha256=
|
17
|
+
clarifai/client/auth/helper.py,sha256=Wa5gkX0dl0xBwrT0E10lv9_Eoeh-WrMZR0Qc0YcV7es,14225
|
19
18
|
clarifai/client/auth/register.py,sha256=2CMdBsoVLoTfjyksE6j7BM2tiEc73WKYvxnwDDgNn1k,536
|
20
19
|
clarifai/client/auth/stub.py,sha256=KIzJZ8aRB1RzXJeWHDAx19HNdBsblPPHwYLfAkgI3rY,3779
|
21
20
|
clarifai/constants/dataset.py,sha256=OXYirr0iaoN_47V6wxO0H6ptV81y8zNGapPBz9qqD8o,516
|
21
|
+
clarifai/constants/input.py,sha256=WcHwToUVIK9ItAhDefaSohQHCLNeR55PSjZ0BFnoZ3U,28
|
22
22
|
clarifai/constants/model.py,sha256=LsMkLVkuBpfS4j4yDW9M4O7HxzRpIuSo9qU5T8Wg2Co,217
|
23
23
|
clarifai/constants/rag.py,sha256=WcHwToUVIK9ItAhDefaSohQHCLNeR55PSjZ0BFnoZ3U,28
|
24
24
|
clarifai/constants/search.py,sha256=_g3S-JEvuygiFfMVK3cl4Ry9erZpt8Zo4ilXL2i3DAE,52
|
25
25
|
clarifai/constants/workflow.py,sha256=cECq1xdvf44MCdtK2AbkiuuwhyL-6OWZdQfYbsLKy_o,33
|
26
26
|
clarifai/datasets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
27
27
|
clarifai/datasets/export/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
28
|
-
clarifai/datasets/export/inputs_annotations.py,sha256=
|
28
|
+
clarifai/datasets/export/inputs_annotations.py,sha256=3Bv6JsPzSeGEEJlkF1KR8qDHc_QyHF0ddvHfSiB5Pjc,9479
|
29
29
|
clarifai/datasets/upload/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
30
30
|
clarifai/datasets/upload/base.py,sha256=IP4sdBRfThk2l0W1rDWciFrAJnKwVsM-gu4zEslJ2_E,2198
|
31
31
|
clarifai/datasets/upload/features.py,sha256=KeVxO36WrL3uqWCN_-aex1k28C5ZRTm6G8SmTtus6KA,1571
|
@@ -92,24 +92,23 @@ clarifai/modules/style.css,sha256=j7FNPZVhLPj35vvBksAJ90RuX5sLuqzDR5iM2WIEhiA,60
|
|
92
92
|
clarifai/rag/__init__.py,sha256=wu3PzAzo7uqgrEzuaC9lY_3gj1HFiR3GU3elZIKTT5g,40
|
93
93
|
clarifai/rag/rag.py,sha256=fYCIs9WJKugRFZ6Xt468_7PE6ipE3x4DfaQzvw4EkuY,12392
|
94
94
|
clarifai/rag/utils.py,sha256=aqAM120xC8DcpqWMrsKsmT9QwrKKJZYBLyDgYb8_L-8,4061
|
95
|
-
clarifai/runners/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
96
|
-
clarifai/runners/example.py,sha256=V0Nc52JkhCm97oaWzKVg71g50M1ltxI9jyPMo6tKU6E,1302
|
97
|
-
clarifai/runners/example_llama2.py,sha256=WMGTqv3v9t3ID1rjW9BTLMkIuvyTESL6xHcOO6A220Y,2712
|
98
95
|
clarifai/schema/search.py,sha256=JjTi8ammJgZZ2OGl4K6tIA4zEJ1Fr2ASZARXavI1j5c,2448
|
99
96
|
clarifai/urls/helper.py,sha256=tjoMGGHuWX68DUB0pk4MEjrmFsClUAQj2jmVEM_Sy78,4751
|
100
97
|
clarifai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
101
98
|
clarifai/utils/logging.py,sha256=xJTteoUodQ7RfsbO676QgidKa5EVPbdUu89Xlwwso2s,4533
|
102
|
-
clarifai/utils/misc.py,sha256=
|
103
|
-
clarifai/utils/model_train.py,sha256=
|
104
|
-
clarifai/utils/evaluation/__init__.py,sha256=
|
99
|
+
clarifai/utils/misc.py,sha256=GznzquXXFt8J9qzMWtTJPFWCSc5QTs_ZBldW1mXCZzE,1285
|
100
|
+
clarifai/utils/model_train.py,sha256=Mndqy5GNu7kjQHjDyNVyamL0hQFLGSHcWhOuPyOvr1w,8005
|
101
|
+
clarifai/utils/evaluation/__init__.py,sha256=PYkurUrXrGevByj7RFb6CoU1iC7fllyQSfnnlo9WnY8,69
|
105
102
|
clarifai/utils/evaluation/helpers.py,sha256=d_dcASRI_lhsHIRukAF1S-w7XazLpK9y6E_ug3l50t4,18440
|
103
|
+
clarifai/utils/evaluation/main.py,sha256=0SbTN8cUDjfd4vHUSIG0qVVmDDlJE_5il4vcZGi6a5g,15781
|
104
|
+
clarifai/utils/evaluation/testset_annotation_parser.py,sha256=iZfLw6oR1qgJ3MHMbOZXcGBLu7btSDn0VqdiAzpIm4g,5002
|
106
105
|
clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
107
106
|
clarifai/workflows/export.py,sha256=vICRhIreqDSShxLKjHNM2JwzKsf1B4fdXB0ciMcA70k,1945
|
108
107
|
clarifai/workflows/utils.py,sha256=nGeB_yjVgUO9kOeKTg4OBBaBz-AwXI3m-huSVj-9W18,1924
|
109
108
|
clarifai/workflows/validate.py,sha256=yJq03MaJqi5AK3alKGJJBR89xmmjAQ31sVufJUiOqY8,2556
|
110
|
-
clarifai-10.
|
111
|
-
clarifai-10.
|
112
|
-
clarifai-10.
|
113
|
-
clarifai-10.
|
114
|
-
clarifai-10.
|
115
|
-
clarifai-10.
|
109
|
+
clarifai-10.3.0.dist-info/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
|
110
|
+
clarifai-10.3.0.dist-info/METADATA,sha256=O4qvini2SsJNHhe2zizbmaNrl52Ch7h_3AV1IkDMjEA,18723
|
111
|
+
clarifai-10.3.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
112
|
+
clarifai-10.3.0.dist-info/entry_points.txt,sha256=qZOr_MIPG0dBBE1zringDJS_wXNGTAA_SQ-zcbmDHOw,82
|
113
|
+
clarifai-10.3.0.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
|
114
|
+
clarifai-10.3.0.dist-info/RECORD,,
|
clarifai/client/runner.py
DELETED
@@ -1,234 +0,0 @@
|
|
1
|
-
# Copyright 2023 Clarifai, Inc.
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3
|
-
# you may not use this file except in compliance with the License.
|
4
|
-
# You may obtain a copy of the License at
|
5
|
-
#
|
6
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7
|
-
#
|
8
|
-
# Unless required by applicable law or agreed to in writing, software
|
9
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11
|
-
# See the License for the specific language governing permissions and
|
12
|
-
# limitations under the License.
|
13
|
-
"""Interface to Clarifai Runners API."""
|
14
|
-
|
15
|
-
import os
|
16
|
-
import threading
|
17
|
-
import traceback
|
18
|
-
from concurrent.futures import ThreadPoolExecutor
|
19
|
-
|
20
|
-
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
|
21
|
-
from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
|
22
|
-
from google.protobuf import json_format
|
23
|
-
|
24
|
-
from clarifai.client.base import BaseClient
|
25
|
-
from clarifai.errors import UserError
|
26
|
-
from clarifai.utils.logging import get_logger
|
27
|
-
|
28
|
-
|
29
|
-
class Runner(BaseClient):
|
30
|
-
"""Base class for remote inference runners. This should be subclassed with the run_input method
|
31
|
-
implemented to process each input in the request.
|
32
|
-
|
33
|
-
Then on the subclass call start() to start the run loop.
|
34
|
-
"""
|
35
|
-
|
36
|
-
def __init__(self,
|
37
|
-
runner_id: str,
|
38
|
-
user_id: str = None,
|
39
|
-
check_runner_exists: bool = True,
|
40
|
-
base_url: str = "https://api.clarifai.com",
|
41
|
-
pat: str = None,
|
42
|
-
token: str = None,
|
43
|
-
num_parallel_polls: int = 4,
|
44
|
-
**kwargs) -> None:
|
45
|
-
"""
|
46
|
-
Args:
|
47
|
-
runner_id (str): the id of the runner to use. Create the runner in the Clarifai API first
|
48
|
-
user_id (str): Clarifai User ID
|
49
|
-
base_url (str): Base API url. Default "https://api.clarifai.com"
|
50
|
-
pat (str): A personal access token for authentication. Can be set as env var CLARIFAI_PAT
|
51
|
-
token (str): A session token for authentication. Accepts either a session token or a pat. Can be set as env var CLARIFAI_SESSION_TOKEN
|
52
|
-
num_parallel_polls (int): the max number of threads for parallel run loops to be fetching work from
|
53
|
-
"""
|
54
|
-
user_id = user_id or os.environ.get("CLARIFAI_USER_ID", "")
|
55
|
-
|
56
|
-
if not user_id:
|
57
|
-
raise UserError(
|
58
|
-
"Set CLARIFAI_USER_ID as environment variables or pass user_id as input arguments")
|
59
|
-
|
60
|
-
self.runner_id = runner_id
|
61
|
-
self.logger = get_logger("INFO", __name__)
|
62
|
-
self.kwargs = {**kwargs, 'id': runner_id, 'user_id': user_id}
|
63
|
-
self.runner_info = resources_pb2.Runner(**self.kwargs)
|
64
|
-
self.num_parallel_polls = min(10, num_parallel_polls)
|
65
|
-
BaseClient.__init__(self, user_id=self.user_id, app_id="", base=base_url, pat=pat, token=token)
|
66
|
-
|
67
|
-
# Check that the runner exists.
|
68
|
-
if check_runner_exists:
|
69
|
-
request = service_pb2.GetRunnerRequest(user_app_id=self.user_app_id, runner_id=runner_id)
|
70
|
-
response = self._grpc_request(self.STUB.GetRunner, request)
|
71
|
-
if response.status.code != status_code_pb2.SUCCESS:
|
72
|
-
raise Exception(
|
73
|
-
f"Error getting runner, are you use this is a valid runner id {runner_id} for user_id {user_id}. Error: {response.status.description}, Details: {response.status.details}"
|
74
|
-
)
|
75
|
-
|
76
|
-
def start(self):
|
77
|
-
"""
|
78
|
-
Start the run loop. This will ask the Clarifai API for work, and when it gets work, it will run
|
79
|
-
the model on the inputs and post the results back to the Clarifai API. It will then ask for more
|
80
|
-
work again.
|
81
|
-
"""
|
82
|
-
self._long_poll_loop()
|
83
|
-
|
84
|
-
def _run(self, request: service_pb2.PostModelOutputsRequest) -> service_pb2.MultiOutputResponse:
|
85
|
-
"""
|
86
|
-
Run the model on the given request. You shouldn't need to override this method, see run_input
|
87
|
-
for the implementation to process each input in the request.
|
88
|
-
|
89
|
-
Args:
|
90
|
-
request: service_pb2.PostModelOutputsRequest - the request to run the model on
|
91
|
-
|
92
|
-
Returns:
|
93
|
-
service_pb2.MultiOutputResponse - the response from the model's run_input implementation.
|
94
|
-
"""
|
95
|
-
outputs = []
|
96
|
-
|
97
|
-
# TODO: Could cache the model and this conversion if the hash is the same.
|
98
|
-
model = request.model
|
99
|
-
output_info = None
|
100
|
-
if request.model.model_version.id != "":
|
101
|
-
output_info = json_format.MessageToDict(
|
102
|
-
model.model_version.output_info, preserving_proto_field_name=True)
|
103
|
-
|
104
|
-
# TODO: parallelize this over inputs in a single request.
|
105
|
-
for inp in request.inputs:
|
106
|
-
try:
|
107
|
-
output = self.run_input(inp, output_info)
|
108
|
-
except Exception as e:
|
109
|
-
self.logger.exception("run_input failed:")
|
110
|
-
traceback.print_exc()
|
111
|
-
output = resources_pb2.Output(status=status_pb2.Status(
|
112
|
-
code=status_code_pb2.MODEL_PREDICTION_FAILED,
|
113
|
-
description="Failed",
|
114
|
-
details="",
|
115
|
-
stack_trace=traceback.format_exc().split('\n'),
|
116
|
-
internal_details=str(e),
|
117
|
-
))
|
118
|
-
self.logger.exception(output)
|
119
|
-
outputs.append(output)
|
120
|
-
|
121
|
-
successes = [o.status.code == status_code_pb2.SUCCESS for o in outputs]
|
122
|
-
if all(successes):
|
123
|
-
status = status_pb2.Status(
|
124
|
-
code=status_code_pb2.SUCCESS,
|
125
|
-
description="Success",
|
126
|
-
)
|
127
|
-
elif any(successes):
|
128
|
-
status = status_pb2.Status(
|
129
|
-
code=status_code_pb2.MIXED_STATUS,
|
130
|
-
description="Mixed Status",
|
131
|
-
)
|
132
|
-
else:
|
133
|
-
status = status_pb2.Status(
|
134
|
-
code=status_code_pb2.FAILURE,
|
135
|
-
description="Failed",
|
136
|
-
)
|
137
|
-
|
138
|
-
return service_pb2.MultiOutputResponse(
|
139
|
-
status=status,
|
140
|
-
outputs=outputs,
|
141
|
-
)
|
142
|
-
|
143
|
-
def run_input(self, input: resources_pb2.Input,
|
144
|
-
output_info: resources_pb2.OutputInfo) -> resources_pb2.Output:
|
145
|
-
"""
|
146
|
-
Run the model on the given input in the request. This is the method you should override to
|
147
|
-
process each input in the request.
|
148
|
-
|
149
|
-
Args:
|
150
|
-
input: resources_pb2.Input - the input to run the model on
|
151
|
-
output_info: resources_pb2.OutputInfo - the output info for the model which includes
|
152
|
-
output_info.params that the model can pass in on very prediction request. These can be provided
|
153
|
-
during PostModelVersions as default for every request or can be overridden on a per request by
|
154
|
-
passing in output_info in the PostModelOutputs request as the model.model_version.output_info.params
|
155
|
-
field.
|
156
|
-
|
157
|
-
Returns:
|
158
|
-
resources_pb2.Output - the response from the model's run_input implementation.
|
159
|
-
"""
|
160
|
-
raise NotImplementedError("run_input() not implemented")
|
161
|
-
|
162
|
-
def _long_poll_loop(self):
|
163
|
-
"""
|
164
|
-
This method will long poll for work, and when it gets work, it will run the model on the inputs
|
165
|
-
and post the results back to the Clarifai API. It will then long poll again for more work.
|
166
|
-
"""
|
167
|
-
|
168
|
-
def _single_thread_loop():
|
169
|
-
c = 0
|
170
|
-
while True:
|
171
|
-
# Long poll waiting for work.
|
172
|
-
try:
|
173
|
-
self.logger.info(f"Loop iteration: {c} for thread {threading.get_ident()}")
|
174
|
-
work_response = self._grpc_request(self.STUB.ListRunnerItems,
|
175
|
-
service_pb2.ListRunnerItemsRequest(
|
176
|
-
user_app_id=self.user_app_id,
|
177
|
-
runner_id=self.runner_id))
|
178
|
-
if work_response.status.code == status_code_pb2.RUNNER_NEEDS_RETRY:
|
179
|
-
c += 1
|
180
|
-
continue # immediate restart the long poll
|
181
|
-
if work_response.status.code != status_code_pb2.SUCCESS:
|
182
|
-
raise Exception("Error getting work: {}".format(work_response.status.description))
|
183
|
-
if len(work_response.items) == 0:
|
184
|
-
self.logger.info("No work to do. Waiting...")
|
185
|
-
continue
|
186
|
-
|
187
|
-
# We have work to do. Run the model on each item fetched (typically 1 item at a time right
|
188
|
-
# now is returned by ListRunnerItems).
|
189
|
-
for item in work_response.items:
|
190
|
-
if not item.HasField('post_model_outputs_request'):
|
191
|
-
raise Exception("Unexpected work item type: {}".format(item))
|
192
|
-
self.logger.info(
|
193
|
-
f"Working on item: {item.id} with inputs {len(item.post_model_outputs_request.inputs)}"
|
194
|
-
)
|
195
|
-
# run this batch of data through the model.
|
196
|
-
result = self._run(item.post_model_outputs_request)
|
197
|
-
|
198
|
-
result_response = self._grpc_request(
|
199
|
-
self.STUB.PostRunnerItemOutputs,
|
200
|
-
service_pb2.PostRunnerItemOutputsRequest(
|
201
|
-
user_app_id=self.user_app_id,
|
202
|
-
item_id=item.id,
|
203
|
-
runner_id=self.runner_id,
|
204
|
-
runner_item_outputs=[
|
205
|
-
service_pb2.RunnerItemOutput(multi_output_response=result)
|
206
|
-
]))
|
207
|
-
if result_response.status.code != status_code_pb2.SUCCESS:
|
208
|
-
raise Exception(
|
209
|
-
json_format.MessageToJson(result_response, preserving_proto_field_name=True))
|
210
|
-
# raise Exception("Error posting result: {}".format(result_response.status.description))
|
211
|
-
except Exception as e: # keep the loop running
|
212
|
-
self.logger.exception(e)
|
213
|
-
continue
|
214
|
-
|
215
|
-
self.logger.info(f"Starting run loop for {self.num_parallel_polls} threads...")
|
216
|
-
if self.num_parallel_polls == 1:
|
217
|
-
_single_thread_loop()
|
218
|
-
else:
|
219
|
-
pool = ThreadPoolExecutor(max_workers=self.num_parallel_polls)
|
220
|
-
futures = [pool.submit(_single_thread_loop) for _ in range(self.num_parallel_polls)]
|
221
|
-
for f in futures:
|
222
|
-
self.logger.info(f.result())
|
223
|
-
pool.shutdown()
|
224
|
-
|
225
|
-
def __getattr__(self, name):
|
226
|
-
return getattr(self.runner_info, name)
|
227
|
-
|
228
|
-
def __str__(self):
|
229
|
-
init_params = [param for param in self.kwargs.keys()]
|
230
|
-
attribute_strings = [
|
231
|
-
f"{param}={getattr(self.runner_info, param)}" for param in init_params
|
232
|
-
if hasattr(self.runner_info, param)
|
233
|
-
]
|
234
|
-
return f"Runner Details: \n{', '.join(attribute_strings)}\n"
|
clarifai/runners/__init__.py
DELETED
File without changes
|
clarifai/runners/example.py
DELETED
@@ -1,40 +0,0 @@
|
|
1
|
-
from clarifai_grpc.grpc.api import resources_pb2
|
2
|
-
|
3
|
-
from clarifai.client.runner import Runner
|
4
|
-
|
5
|
-
|
6
|
-
class MyRunner(Runner):
|
7
|
-
"""A custom runner that adds "Hello World" to the end of the text and replaces the domain of the
|
8
|
-
image URL as an example.
|
9
|
-
"""
|
10
|
-
|
11
|
-
def run_input(self, input: resources_pb2.Input,
|
12
|
-
output_info: resources_pb2.OutputInfo) -> resources_pb2.Output:
|
13
|
-
"""This is the method that will be called when the runner is run. It takes in an input and
|
14
|
-
returns an output.
|
15
|
-
"""
|
16
|
-
|
17
|
-
output = resources_pb2.Output()
|
18
|
-
|
19
|
-
data = input.data
|
20
|
-
|
21
|
-
# Optional use of output_info
|
22
|
-
params_dict = {}
|
23
|
-
if "params" in output_info:
|
24
|
-
params_dict = output_info["params"]
|
25
|
-
|
26
|
-
if data.text.raw != "":
|
27
|
-
output.data.text.raw = data.text.raw + "Hello World" + params_dict.get("hello", "")
|
28
|
-
if data.image.url != "":
|
29
|
-
output.data.text.raw = data.image.url.replace("samples.clarifai.com",
|
30
|
-
"newdomain.com" + params_dict.get("domain",))
|
31
|
-
return output
|
32
|
-
|
33
|
-
|
34
|
-
if __name__ == '__main__':
|
35
|
-
# Make sure you set these env vars before running the example.
|
36
|
-
# CLARIFAI_PAT
|
37
|
-
# CLARIFAI_USER_ID
|
38
|
-
|
39
|
-
# You need to first create a runner in the Clarifai API and then use the ID here.
|
40
|
-
MyRunner(runner_id="sdk-test-runner").start()
|
@@ -1,81 +0,0 @@
|
|
1
|
-
import time
|
2
|
-
|
3
|
-
import requests
|
4
|
-
from clarifai_grpc.grpc.api import resources_pb2
|
5
|
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
6
|
-
|
7
|
-
from clarifai.client.runner import Runner
|
8
|
-
|
9
|
-
# This example requires to run the following before running this example:
|
10
|
-
# pip install transformers
|
11
|
-
|
12
|
-
# https://huggingface.co/TheBloke/Llama-2-70B-chat-GPTQ
|
13
|
-
model_name_or_path = "TheBloke/Llama-2-7B-chat-GPTQ"
|
14
|
-
model_basename = "model"
|
15
|
-
|
16
|
-
use_triton = False
|
17
|
-
|
18
|
-
|
19
|
-
class Llama2Runner(Runner):
|
20
|
-
"""A custom runner that runs the LLama2 LLM.
|
21
|
-
"""
|
22
|
-
|
23
|
-
def __init__(self, *args, **kwargs):
|
24
|
-
super(Llama2Runner, self).__init__(*args, **kwargs)
|
25
|
-
self.logger.info("Starting to load the model...")
|
26
|
-
st = time.time()
|
27
|
-
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
|
28
|
-
self.model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map='auto')
|
29
|
-
|
30
|
-
self.logger.info("Loading model complete in (%f seconds), ready to loop for requests." %
|
31
|
-
(time.time() - st))
|
32
|
-
|
33
|
-
def run_input(self, input: resources_pb2.Input,
|
34
|
-
output_info: resources_pb2.OutputInfo) -> resources_pb2.Output:
|
35
|
-
"""This is the method that will be called when the runner is run. It takes in an input and
|
36
|
-
returns an output.
|
37
|
-
"""
|
38
|
-
|
39
|
-
output = resources_pb2.Output()
|
40
|
-
data = input.data
|
41
|
-
if data.text.raw != "":
|
42
|
-
input_text = data.text.raw
|
43
|
-
elif data.text.url != "":
|
44
|
-
input_text = str(requests.get(data.text.url).text)
|
45
|
-
else:
|
46
|
-
raise Exception("Need to include data.text.raw or data.text.url in your inputs.")
|
47
|
-
|
48
|
-
if "params" in output_info:
|
49
|
-
params_dict = output_info["params"]
|
50
|
-
self.logger.info("params_dict: %s", params_dict)
|
51
|
-
|
52
|
-
time.time()
|
53
|
-
max_tokens = 1024
|
54
|
-
# # Method 1
|
55
|
-
# input_ids = self.tokenizer(input_text, return_tensors='pt').input_ids.cuda()
|
56
|
-
# out = self.model.generate(inputs=input_ids, temperature=0.7, max_new_tokens=max_tokens)
|
57
|
-
# out_text = self.tokenizer.decode(out[0], skip_special_tokens=True)
|
58
|
-
# output.data.text.raw = out_text.replace(input_text, '')
|
59
|
-
|
60
|
-
# # Method 2
|
61
|
-
pipe = pipeline(
|
62
|
-
"text-generation",
|
63
|
-
model=self.model,
|
64
|
-
tokenizer=self.tokenizer,
|
65
|
-
max_new_tokens=max_tokens,
|
66
|
-
temperature=0.7,
|
67
|
-
top_p=0.95,
|
68
|
-
repetition_penalty=1.15,
|
69
|
-
return_full_text=False)
|
70
|
-
a = pipe(input_text)
|
71
|
-
output.data.text.raw = a[0]['generated_text']
|
72
|
-
return output
|
73
|
-
|
74
|
-
|
75
|
-
if __name__ == '__main__':
|
76
|
-
# Make sure you set these env vars before running the example.
|
77
|
-
# CLARIFAI_PAT
|
78
|
-
# CLARIFAI_USER_ID
|
79
|
-
|
80
|
-
# You need to first create a runner in the Clarifai API and then use the ID here.
|
81
|
-
Llama2Runner(runner_id="sdk-llama2-runner").start()
|
File without changes
|
File without changes
|
File without changes
|