clarifai 9.6.1__py3-none-any.whl → 9.6.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/auth/helper.py +3 -3
- clarifai/models/model_serving/constants.py +2 -3
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +47 -0
- clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +60 -0
- clarifai/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
- clarifai/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +51 -0
- clarifai/models/model_serving/examples/visual_embedding/vit-base/1/model.py +60 -0
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +55 -0
- clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +60 -0
- clarifai/models/model_serving/model_config/deploy.py +27 -3
- clarifai/models/model_serving/model_config/triton_config.py +30 -0
- clarifai/models/model_serving/models/model_types.py +122 -0
- clarifai/models/model_serving/models/output.py +62 -0
- clarifai/models/model_serving/models/pb_model.py +0 -1
- clarifai/modules/style.css +7 -0
- clarifai/runners/base.py +140 -0
- clarifai/runners/example.py +36 -0
- {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/METADATA +1 -1
- {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/RECORD +44 -22
- clarifai_utils/auth/helper.py +3 -3
- clarifai_utils/models/model_serving/constants.py +2 -3
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py +47 -0
- clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py +60 -0
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py +51 -0
- clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py +60 -0
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py +0 -0
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py +55 -0
- clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py +60 -0
- clarifai_utils/models/model_serving/model_config/deploy.py +27 -3
- clarifai_utils/models/model_serving/model_config/triton_config.py +30 -0
- clarifai_utils/models/model_serving/models/model_types.py +122 -0
- clarifai_utils/models/model_serving/models/output.py +62 -0
- clarifai_utils/models/model_serving/models/pb_model.py +0 -1
- clarifai_utils/modules/style.css +7 -0
- clarifai_utils/runners/base.py +140 -0
- clarifai_utils/runners/example.py +36 -0
- {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/LICENSE +0 -0
- {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/WHEEL +0 -0
- {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/entry_points.txt +0 -0
- {clarifai-9.6.1.dist-info → clarifai-9.6.3.dist-info}/top_level.txt +0 -0
clarifai/runners/base.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
# Copyright 2023 Clarifai, Inc.
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
"""Interface to Clarifai Runners API."""
|
|
14
|
+
|
|
15
|
+
from typing import Type
|
|
16
|
+
|
|
17
|
+
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
|
|
18
|
+
from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
|
|
19
|
+
from google.protobuf import json_format
|
|
20
|
+
|
|
21
|
+
from clarifai.auth.helper import ClarifaiAuthHelper
|
|
22
|
+
from clarifai.client import create_stub
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class BaseRunner:
|
|
26
|
+
"""
|
|
27
|
+
Base class for remote inference runners. This should be subclassed with the run_input method
|
|
28
|
+
implemented to process each input in the request.
|
|
29
|
+
|
|
30
|
+
Then on the subclass call start() to start the run loop.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, auth: Type[ClarifaiAuthHelper], runner_id: str) -> None:
|
|
34
|
+
"""
|
|
35
|
+
Args:
|
|
36
|
+
auth: ClarifaiAuthHelper - the auth object to use
|
|
37
|
+
runner_id: str - the id of the runner to use. Create the runner in the Clarifai API first
|
|
38
|
+
|
|
39
|
+
"""
|
|
40
|
+
self.auth = auth
|
|
41
|
+
self.stub = create_stub(self.auth)
|
|
42
|
+
self.runner_id = runner_id
|
|
43
|
+
|
|
44
|
+
# Check that the runner exists.
|
|
45
|
+
response = self.stub.GetRunner(
|
|
46
|
+
service_pb2.GetRunnerRequest(
|
|
47
|
+
user_app_id=self.auth.get_user_app_id_proto(), runner_id=self.runner_id))
|
|
48
|
+
if work_response.status.code != status_code_pb2.SUCCESS:
|
|
49
|
+
raise Exception(
|
|
50
|
+
f"Error getting runner, are you use this is a valid runner id {runner_id} at the user_id/app_id {self.auth.get_user_app_id_proto().user_id}/{self.auth.get_user_app_id_proto().app_id}. Error: {response.status.description}"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def start(self):
|
|
54
|
+
"""
|
|
55
|
+
Start the run loop. This will ask the Clarifai API for work, and when it gets work, it will run
|
|
56
|
+
the model on the inputs and post the results back to the Clarifai API. It will then ask for more
|
|
57
|
+
work again.
|
|
58
|
+
"""
|
|
59
|
+
self._long_poll_loop()
|
|
60
|
+
|
|
61
|
+
def _run(self, request: service_pb2.PostModelOutputsRequest) -> service_pb2.MultiOutputResponse:
|
|
62
|
+
"""
|
|
63
|
+
Run the model on the given request. You shouldn't need to override this method, see run_input
|
|
64
|
+
for the implementation to process each input in the request.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
request: service_pb2.PostModelOutputsRequest - the request to run the model on
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
service_pb2.MultiOutputResponse - the response from the model's run_input implementation.
|
|
71
|
+
"""
|
|
72
|
+
outputs = []
|
|
73
|
+
# TODO: parallelize this
|
|
74
|
+
for inp in request.inputs:
|
|
75
|
+
# TODO: handle errors
|
|
76
|
+
outputs.append(self.run_input(inp))
|
|
77
|
+
|
|
78
|
+
return service_pb2.MultiOutputResponse(
|
|
79
|
+
status=status_pb2.Status(
|
|
80
|
+
code=status_code_pb2.SUCCESS,
|
|
81
|
+
description="Success",
|
|
82
|
+
),
|
|
83
|
+
outputs=outputs,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
def run_input(self, input: resources_pb2.Input) -> resources_pb2.Output:
|
|
87
|
+
"""
|
|
88
|
+
Run the model on the given input in the request. This is the method you should override to
|
|
89
|
+
process each input in the request.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
input: resources_pb2.Input - the input to run the model on
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
resources_pb2.Output - the response from the model's run_input implementation.
|
|
96
|
+
"""
|
|
97
|
+
raise NotImplementedError("run_input() not implemented")
|
|
98
|
+
|
|
99
|
+
def _long_poll_loop(self):
|
|
100
|
+
"""
|
|
101
|
+
This method will long poll for work, and when it gets work, it will run the model on the inputs
|
|
102
|
+
and post the results back to the Clarifai API. It will then long poll again for more work.
|
|
103
|
+
"""
|
|
104
|
+
c = 0
|
|
105
|
+
# TODO: handle more errors within this loop so it never stops.
|
|
106
|
+
# TODO: perhaps have multiple processes running this loop to handle more work.
|
|
107
|
+
while True:
|
|
108
|
+
# Long poll waiting for work.
|
|
109
|
+
print("Loop iteration: {}".format(c))
|
|
110
|
+
work_response = self.stub.ListRunnerItems(
|
|
111
|
+
service_pb2.ListRunnerItemsRequest(
|
|
112
|
+
user_app_id=self.auth.get_user_app_id_proto(), runner_id=self.runner_id))
|
|
113
|
+
if work_response.status.code == status_code_pb2.RUNNER_NEEDS_RETRY:
|
|
114
|
+
c += 1
|
|
115
|
+
continue # immediate restart the long poll
|
|
116
|
+
if work_response.status.code != status_code_pb2.SUCCESS:
|
|
117
|
+
raise Exception("Error getting work: {}".format(work_response.status.description))
|
|
118
|
+
if len(work_response.items) == 0:
|
|
119
|
+
print("No work to do. Waiting...")
|
|
120
|
+
continue
|
|
121
|
+
|
|
122
|
+
# We have work to do. Run the model on the inputs.
|
|
123
|
+
for item in work_response.items:
|
|
124
|
+
if not item.HasField('post_model_outputs_request'):
|
|
125
|
+
raise Exception("Unexpected work item type: {}".format(item))
|
|
126
|
+
print(
|
|
127
|
+
f"Working on item: {item.id} with inputs {len(item.post_model_outputs_request.inputs)}"
|
|
128
|
+
)
|
|
129
|
+
result = self._run(item.post_model_outputs_request)
|
|
130
|
+
|
|
131
|
+
result_response = self.stub.PostRunnerItemOutputs(
|
|
132
|
+
service_pb2.PostRunnerItemOutputsRequest(
|
|
133
|
+
user_app_id=self.auth.get_user_app_id_proto(),
|
|
134
|
+
item_id=item.id,
|
|
135
|
+
runner_id=self.runner_id,
|
|
136
|
+
runner_item_outputs=[service_pb2.RunnerItemOutput(multi_output_response=result)]))
|
|
137
|
+
if result_response.status.code != status_code_pb2.SUCCESS:
|
|
138
|
+
raise Exception(
|
|
139
|
+
json_format.MessageToJson(result_response, preserving_proto_field_name=True))
|
|
140
|
+
# raise Exception("Error posting result: {}".format(result_response.status.description))
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from clarifai_grpc.grpc.api import resources_pb2
|
|
2
|
+
|
|
3
|
+
from clarifai.auth.helper import ClarifaiAuthHelper
|
|
4
|
+
from clarifai.runners.base import BaseRunner
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class MyRunner(BaseRunner):
|
|
8
|
+
""" A custom runner that adds "Hello World" to the end of the text and replaces the domain of the
|
|
9
|
+
image URL as an example.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def run_input(self, input: resources_pb2.Input) -> resources_pb2.Output:
|
|
13
|
+
""" This is the method that will be called when the runner is run. It takes in an input and
|
|
14
|
+
returns an output.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
output = resources_pb2.Output()
|
|
18
|
+
|
|
19
|
+
data = input.data
|
|
20
|
+
|
|
21
|
+
if data.text.raw != "":
|
|
22
|
+
output.data.text.raw = data.text.raw + "Hello World"
|
|
23
|
+
if data.image.url != "":
|
|
24
|
+
output.data.text.raw = data.image.url.replace("samples.clarifai.com", "newdomain.com")
|
|
25
|
+
return output
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
if __name__ == '__main__':
|
|
29
|
+
# Make sure you set these env vars before running the example.
|
|
30
|
+
# CLARIFAI_PAT
|
|
31
|
+
# CLARIFAI_USER_ID
|
|
32
|
+
# CLARIFAI_APP_ID
|
|
33
|
+
auth = ClarifaiAuthHelper.from_env()
|
|
34
|
+
|
|
35
|
+
# You need to first create a runner in the Clarifai API and then use the ID here.
|
|
36
|
+
MyRunner(auth, runner_id="laptop_runner").start()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
clarifai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
clarifai/auth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
clarifai/auth/helper.py,sha256=
|
|
3
|
+
clarifai/auth/helper.py,sha256=4ZxyH43qQOoHse_ypmcaUpbhV4BrZ3I61GhgbxF2_9Q,13921
|
|
4
4
|
clarifai/client/__init__.py,sha256=DlA1iS8Szl3I7oGw5UBVra7-dZ1M_wKxcZXCoUcDBJ8,121
|
|
5
5
|
clarifai/client/abc.py,sha256=lRAIhYSUwHh0Xp6yZWVWXqWF1mOy9A-8aKscmoPXEnM,536
|
|
6
6
|
clarifai/client/stub.py,sha256=pa1Sh9HeUG7Ipi3f-OmTRFp0hvH31PF60NsC7IwFBNI,3606
|
|
@@ -46,7 +46,7 @@ clarifai/listing/modules.py,sha256=yM074edY2d19HHt2MpO6_oAzDxM8yuc4hMns4D8zQTs,1
|
|
|
46
46
|
clarifai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
47
|
clarifai/models/api.py,sha256=fsE6eLZ0NgcvDpAQcRpT2DPataBTg0gIEOWxtoNeYXM,9555
|
|
48
48
|
clarifai/models/model_serving/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
|
|
49
|
-
clarifai/models/model_serving/constants.py,sha256=
|
|
49
|
+
clarifai/models/model_serving/constants.py,sha256=JUXeo3Nla9UMA9U9yG-nbOfcRdEAwl30lj13Rp11xcg,196
|
|
50
50
|
clarifai/models/model_serving/pb_model_repository.py,sha256=jtODfmEa-GrMlyFDNo_1jAYLuJTSy_IBd9BSpK12O8w,3411
|
|
51
51
|
clarifai/models/model_serving/cli/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
|
|
52
52
|
clarifai/models/model_serving/cli/deploy_cli.py,sha256=PVoMUbM-EQ0GGzws7XpPr59IwdBawIl71CM2jOoh-C4,3825
|
|
@@ -58,28 +58,39 @@ clarifai/models/model_serving/examples/image_classification/age_vit/1/model.py,s
|
|
|
58
58
|
clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
59
59
|
clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py,sha256=jzdWLJQ8K6h-CFrmZju-DTuQNPSRXhJTS3zXdNaiA7A,2173
|
|
60
60
|
clarifai/models/model_serving/examples/text_classification/xlm-roberta/1/model.py,sha256=kC2gglV8QI8G4TNwjerDOO-Xm3UHtoMGeapLRpnKP8E,1941
|
|
61
|
+
clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
62
|
+
clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py,sha256=ekHKG926K3JOKUvFPdL0wuhPyTCpkCYsiY39YhchKaA,1847
|
|
63
|
+
clarifai/models/model_serving/examples/text_to_text/bart-summarize/1/model.py,sha256=gLIP0dNXLD9lfBQmXEnGgjDo39QnCcu0PiFB37Wqxgg,1863
|
|
61
64
|
clarifai/models/model_serving/examples/visual_detection/yolov5x/1/inference.py,sha256=bF-hhNBIZTq5Fci182OkWnToAQMJz4W2tzRf9D4oIsQ,2906
|
|
62
65
|
clarifai/models/model_serving/examples/visual_detection/yolov5x/1/model.py,sha256=kC2gglV8QI8G4TNwjerDOO-Xm3UHtoMGeapLRpnKP8E,1941
|
|
66
|
+
clarifai/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
67
|
+
clarifai/models/model_serving/examples/visual_embedding/vit-base/1/inference.py,sha256=P1JLU-N7IzYRzAjRlJjiqat0CF8M7Fzl_U6sfOBa2jM,1971
|
|
68
|
+
clarifai/models/model_serving/examples/visual_embedding/vit-base/1/model.py,sha256=gLIP0dNXLD9lfBQmXEnGgjDo39QnCcu0PiFB37Wqxgg,1863
|
|
69
|
+
clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
70
|
+
clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py,sha256=Det16IapShTKwD6eEOFm7zczNF73ctoJ3sEb9EIY9nU,2119
|
|
71
|
+
clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py,sha256=gLIP0dNXLD9lfBQmXEnGgjDo39QnCcu0PiFB37Wqxgg,1863
|
|
63
72
|
clarifai/models/model_serving/model_config/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
|
|
64
|
-
clarifai/models/model_serving/model_config/deploy.py,sha256=
|
|
73
|
+
clarifai/models/model_serving/model_config/deploy.py,sha256=4JJlor543ePredE88kR8vwvw25xpYb8ESctp7Jr7A_c,2801
|
|
65
74
|
clarifai/models/model_serving/model_config/serializer.py,sha256=rr5Bc7vuExT6L1hoBOaMNKeESQ-qH4TzPleopvdthR4,4231
|
|
66
|
-
clarifai/models/model_serving/model_config/triton_config.py,sha256=
|
|
75
|
+
clarifai/models/model_serving/model_config/triton_config.py,sha256=VG6XjSGVP1pG5YTwfoWVKf4rpXDcf1dumHPaSdMi8bQ,5683
|
|
67
76
|
clarifai/models/model_serving/models/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
|
|
68
77
|
clarifai/models/model_serving/models/inference.py,sha256=BpniZvGAVjMaJaBkZMQob3qqG1QKEA7px12WKzMBMeo,1639
|
|
69
|
-
clarifai/models/model_serving/models/model_types.py,sha256=
|
|
70
|
-
clarifai/models/model_serving/models/output.py,sha256=
|
|
71
|
-
clarifai/models/model_serving/models/pb_model.py,sha256=
|
|
78
|
+
clarifai/models/model_serving/models/model_types.py,sha256=MI9liI-Isg1LxQbtOLhILMmK4vQZi5DunEY6GmftIZc,7069
|
|
79
|
+
clarifai/models/model_serving/models/output.py,sha256=jzF6QMUXnQIWAeyY3-7qGfrr8HtFCth5wr10eSPyFdI,3852
|
|
80
|
+
clarifai/models/model_serving/models/pb_model.py,sha256=gLIP0dNXLD9lfBQmXEnGgjDo39QnCcu0PiFB37Wqxgg,1863
|
|
72
81
|
clarifai/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
73
82
|
clarifai/modules/css.py,sha256=kadCEunmyh5h2yf0-4aysE3ZcZ6qaQcxuAgDXS96yF8,2020
|
|
74
83
|
clarifai/modules/pages.py,sha256=iOoM3RNRMgXlV0qBqcdQofxoXo2RuRQh0h9c9BIS0-I,1383
|
|
75
|
-
clarifai/modules/style.css,sha256=
|
|
84
|
+
clarifai/modules/style.css,sha256=j7FNPZVhLPj35vvBksAJ90RuX5sLuqzDR5iM2WIEhiA,6073
|
|
76
85
|
clarifai/runners/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
77
86
|
clarifai/runners/api.py,sha256=ikjLONGzmkekjXoEvqXD9pv1kfh1OtHAbAh-3j-sfMc,3578
|
|
87
|
+
clarifai/runners/base.py,sha256=s9xXgyyvKkVlA4ypGUVdMZFTziBC-RLeBHuoDLMdGD0,5713
|
|
88
|
+
clarifai/runners/example.py,sha256=s_t2-7aZqXtV9MX7cZNIXJm0gSKCGd1bejbI3ag7a8o,1129
|
|
78
89
|
clarifai/urls/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
79
90
|
clarifai/urls/helper.py,sha256=RHDsZhXE_Vp4QGWmSjatn3j0k6Zu2O1b2gmvI-F_rHY,4276
|
|
80
91
|
clarifai_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
81
92
|
clarifai_utils/auth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
82
|
-
clarifai_utils/auth/helper.py,sha256=
|
|
93
|
+
clarifai_utils/auth/helper.py,sha256=4ZxyH43qQOoHse_ypmcaUpbhV4BrZ3I61GhgbxF2_9Q,13921
|
|
83
94
|
clarifai_utils/client/__init__.py,sha256=DlA1iS8Szl3I7oGw5UBVra7-dZ1M_wKxcZXCoUcDBJ8,121
|
|
84
95
|
clarifai_utils/client/abc.py,sha256=lRAIhYSUwHh0Xp6yZWVWXqWF1mOy9A-8aKscmoPXEnM,536
|
|
85
96
|
clarifai_utils/client/stub.py,sha256=pa1Sh9HeUG7Ipi3f-OmTRFp0hvH31PF60NsC7IwFBNI,3606
|
|
@@ -125,7 +136,7 @@ clarifai_utils/listing/modules.py,sha256=yM074edY2d19HHt2MpO6_oAzDxM8yuc4hMns4D8
|
|
|
125
136
|
clarifai_utils/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
126
137
|
clarifai_utils/models/api.py,sha256=fsE6eLZ0NgcvDpAQcRpT2DPataBTg0gIEOWxtoNeYXM,9555
|
|
127
138
|
clarifai_utils/models/model_serving/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
|
|
128
|
-
clarifai_utils/models/model_serving/constants.py,sha256=
|
|
139
|
+
clarifai_utils/models/model_serving/constants.py,sha256=JUXeo3Nla9UMA9U9yG-nbOfcRdEAwl30lj13Rp11xcg,196
|
|
129
140
|
clarifai_utils/models/model_serving/pb_model_repository.py,sha256=jtODfmEa-GrMlyFDNo_1jAYLuJTSy_IBd9BSpK12O8w,3411
|
|
130
141
|
clarifai_utils/models/model_serving/cli/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
|
|
131
142
|
clarifai_utils/models/model_serving/cli/deploy_cli.py,sha256=PVoMUbM-EQ0GGzws7XpPr59IwdBawIl71CM2jOoh-C4,3825
|
|
@@ -137,28 +148,39 @@ clarifai_utils/models/model_serving/examples/image_classification/age_vit/1/mode
|
|
|
137
148
|
clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
138
149
|
clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/inference.py,sha256=jzdWLJQ8K6h-CFrmZju-DTuQNPSRXhJTS3zXdNaiA7A,2173
|
|
139
150
|
clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/1/model.py,sha256=kC2gglV8QI8G4TNwjerDOO-Xm3UHtoMGeapLRpnKP8E,1941
|
|
151
|
+
clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
152
|
+
clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/inference.py,sha256=ekHKG926K3JOKUvFPdL0wuhPyTCpkCYsiY39YhchKaA,1847
|
|
153
|
+
clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/1/model.py,sha256=gLIP0dNXLD9lfBQmXEnGgjDo39QnCcu0PiFB37Wqxgg,1863
|
|
140
154
|
clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/inference.py,sha256=bF-hhNBIZTq5Fci182OkWnToAQMJz4W2tzRf9D4oIsQ,2906
|
|
141
155
|
clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/1/model.py,sha256=kC2gglV8QI8G4TNwjerDOO-Xm3UHtoMGeapLRpnKP8E,1941
|
|
156
|
+
clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
157
|
+
clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/inference.py,sha256=P1JLU-N7IzYRzAjRlJjiqat0CF8M7Fzl_U6sfOBa2jM,1971
|
|
158
|
+
clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/1/model.py,sha256=gLIP0dNXLD9lfBQmXEnGgjDo39QnCcu0PiFB37Wqxgg,1863
|
|
159
|
+
clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
160
|
+
clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/inference.py,sha256=Det16IapShTKwD6eEOFm7zczNF73ctoJ3sEb9EIY9nU,2119
|
|
161
|
+
clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/1/model.py,sha256=gLIP0dNXLD9lfBQmXEnGgjDo39QnCcu0PiFB37Wqxgg,1863
|
|
142
162
|
clarifai_utils/models/model_serving/model_config/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
|
|
143
|
-
clarifai_utils/models/model_serving/model_config/deploy.py,sha256=
|
|
163
|
+
clarifai_utils/models/model_serving/model_config/deploy.py,sha256=4JJlor543ePredE88kR8vwvw25xpYb8ESctp7Jr7A_c,2801
|
|
144
164
|
clarifai_utils/models/model_serving/model_config/serializer.py,sha256=rr5Bc7vuExT6L1hoBOaMNKeESQ-qH4TzPleopvdthR4,4231
|
|
145
|
-
clarifai_utils/models/model_serving/model_config/triton_config.py,sha256=
|
|
165
|
+
clarifai_utils/models/model_serving/model_config/triton_config.py,sha256=VG6XjSGVP1pG5YTwfoWVKf4rpXDcf1dumHPaSdMi8bQ,5683
|
|
146
166
|
clarifai_utils/models/model_serving/models/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
|
|
147
167
|
clarifai_utils/models/model_serving/models/inference.py,sha256=BpniZvGAVjMaJaBkZMQob3qqG1QKEA7px12WKzMBMeo,1639
|
|
148
|
-
clarifai_utils/models/model_serving/models/model_types.py,sha256=
|
|
149
|
-
clarifai_utils/models/model_serving/models/output.py,sha256=
|
|
150
|
-
clarifai_utils/models/model_serving/models/pb_model.py,sha256=
|
|
168
|
+
clarifai_utils/models/model_serving/models/model_types.py,sha256=MI9liI-Isg1LxQbtOLhILMmK4vQZi5DunEY6GmftIZc,7069
|
|
169
|
+
clarifai_utils/models/model_serving/models/output.py,sha256=jzF6QMUXnQIWAeyY3-7qGfrr8HtFCth5wr10eSPyFdI,3852
|
|
170
|
+
clarifai_utils/models/model_serving/models/pb_model.py,sha256=gLIP0dNXLD9lfBQmXEnGgjDo39QnCcu0PiFB37Wqxgg,1863
|
|
151
171
|
clarifai_utils/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
152
172
|
clarifai_utils/modules/css.py,sha256=kadCEunmyh5h2yf0-4aysE3ZcZ6qaQcxuAgDXS96yF8,2020
|
|
153
173
|
clarifai_utils/modules/pages.py,sha256=iOoM3RNRMgXlV0qBqcdQofxoXo2RuRQh0h9c9BIS0-I,1383
|
|
154
|
-
clarifai_utils/modules/style.css,sha256=
|
|
174
|
+
clarifai_utils/modules/style.css,sha256=j7FNPZVhLPj35vvBksAJ90RuX5sLuqzDR5iM2WIEhiA,6073
|
|
155
175
|
clarifai_utils/runners/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
156
176
|
clarifai_utils/runners/api.py,sha256=ikjLONGzmkekjXoEvqXD9pv1kfh1OtHAbAh-3j-sfMc,3578
|
|
177
|
+
clarifai_utils/runners/base.py,sha256=s9xXgyyvKkVlA4ypGUVdMZFTziBC-RLeBHuoDLMdGD0,5713
|
|
178
|
+
clarifai_utils/runners/example.py,sha256=s_t2-7aZqXtV9MX7cZNIXJm0gSKCGd1bejbI3ag7a8o,1129
|
|
157
179
|
clarifai_utils/urls/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
158
180
|
clarifai_utils/urls/helper.py,sha256=RHDsZhXE_Vp4QGWmSjatn3j0k6Zu2O1b2gmvI-F_rHY,4276
|
|
159
|
-
clarifai-9.6.
|
|
160
|
-
clarifai-9.6.
|
|
161
|
-
clarifai-9.6.
|
|
162
|
-
clarifai-9.6.
|
|
163
|
-
clarifai-9.6.
|
|
164
|
-
clarifai-9.6.
|
|
181
|
+
clarifai-9.6.3.dist-info/LICENSE,sha256=GuQZ4iPZUwh44duTbVr7ZzYp_SaJDLR9MvzU7YqlZXM,555
|
|
182
|
+
clarifai-9.6.3.dist-info/METADATA,sha256=OsPBVEV14RHZoCYHyfJFQEc7tTwqlb5SNAp09OE7upc,2985
|
|
183
|
+
clarifai-9.6.3.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
|
184
|
+
clarifai-9.6.3.dist-info/entry_points.txt,sha256=cna1vVlFIZZZlxHy1AbhooFGy-dw1W2xRfbOVRSWSKg,255
|
|
185
|
+
clarifai-9.6.3.dist-info/top_level.txt,sha256=w3e8wx1HuK3_huGQosppv1_FSoNjBUd09KBKMK3wR-U,24
|
|
186
|
+
clarifai-9.6.3.dist-info/RECORD,,
|
clarifai_utils/auth/helper.py
CHANGED
|
@@ -131,7 +131,7 @@ class ClarifaiAuthHelper:
|
|
|
131
131
|
|
|
132
132
|
# Then add in the query params.
|
|
133
133
|
try:
|
|
134
|
-
auth
|
|
134
|
+
auth.add_streamlit_query_params(st.experimental_get_query_params())
|
|
135
135
|
except Exception as e:
|
|
136
136
|
st.error(e)
|
|
137
137
|
st.stop()
|
|
@@ -165,7 +165,7 @@ class ClarifaiAuthHelper:
|
|
|
165
165
|
auth = ClarifaiAuthHelper("", "", "", "", validate=False)
|
|
166
166
|
|
|
167
167
|
# Then add in the query params.
|
|
168
|
-
auth
|
|
168
|
+
auth.add_streamlit_query_params(query_params)
|
|
169
169
|
|
|
170
170
|
# Then validate.
|
|
171
171
|
auth.validate()
|
|
@@ -238,7 +238,7 @@ Additionally, these optional params are supported:
|
|
|
238
238
|
"""
|
|
239
239
|
user_id = os.environ.get("CLARIFAI_USER_ID", "")
|
|
240
240
|
app_id = os.environ.get("CLARIFAI_APP_ID", "")
|
|
241
|
-
token = os.environ("CLARIFAI_SESSION_TOKEN", "")
|
|
241
|
+
token = os.environ.get("CLARIFAI_SESSION_TOKEN", "")
|
|
242
242
|
pat = os.environ.get("CLARIFAI_PAT", "")
|
|
243
243
|
base = os.environ.get("CLARIFAI_API_BASE", DEFAULT_BASE)
|
|
244
244
|
ui = os.environ.get("CLARIFAI_UI", DEFAULT_UI)
|
|
File without changes
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# This file contains boilerplate code to allow users write their model
|
|
2
|
+
# inference code that will then interact with the Triton Inference Server
|
|
3
|
+
# Python backend to serve end user requests.
|
|
4
|
+
# The module name, module path, class name & get_predictions() method names MUST be maintained as is
|
|
5
|
+
# but other methods may be added within the class as deemed fit provided
|
|
6
|
+
# they are invoked within the main get_predictions() inference method
|
|
7
|
+
# if they play a role in any step of model inference
|
|
8
|
+
"""User model inference script."""
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
import numpy as np
|
|
13
|
+
from transformers import pipeline
|
|
14
|
+
|
|
15
|
+
from clarifai.models.model_serving.models.model_types import text_to_text
|
|
16
|
+
from clarifai.models.model_serving.models.output import TextOutput
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class InferenceModel:
|
|
20
|
+
"""User model inference class."""
|
|
21
|
+
|
|
22
|
+
def __init__(self) -> None:
|
|
23
|
+
"""
|
|
24
|
+
Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
|
|
25
|
+
in this method so they are loaded only once for faster inference.
|
|
26
|
+
"""
|
|
27
|
+
self.base_path: Path = os.path.dirname(__file__)
|
|
28
|
+
self.huggingface_model_path = os.path.join(self.base_path, "bart-large-summarizer")
|
|
29
|
+
self.pipeline = pipeline("summarization", model=self.huggingface_model_path)
|
|
30
|
+
|
|
31
|
+
@text_to_text
|
|
32
|
+
def get_predictions(self, input_data):
|
|
33
|
+
"""
|
|
34
|
+
Generates summaries of input text.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
-----
|
|
38
|
+
input_data: A single input data item to predict on.
|
|
39
|
+
Input data can be an image or text, etc depending on the model type.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
--------
|
|
43
|
+
One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
|
|
44
|
+
"""
|
|
45
|
+
summary = self.pipeline(input_data, max_length=50, min_length=30, do_sample=False)
|
|
46
|
+
generated_text = np.array([summary[0]['summary_text']], dtype=object)
|
|
47
|
+
return TextOutput(predicted_text=generated_text)
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# Copyright 2023 Clarifai, Inc.
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
"""Triton inference server Python Backend Model."""
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
import sys
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
import triton_python_backend_utils as pb_utils
|
|
20
|
+
except ModuleNotFoundError:
|
|
21
|
+
pass
|
|
22
|
+
from google.protobuf import text_format
|
|
23
|
+
from tritonclient.grpc.model_config_pb2 import ModelConfig
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class TritonPythonModel:
|
|
27
|
+
"""
|
|
28
|
+
Triton Python BE Model.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def initialize(self, args):
|
|
32
|
+
"""
|
|
33
|
+
Triton server init.
|
|
34
|
+
"""
|
|
35
|
+
args["model_repository"] = args["model_repository"].replace("/1/model.py", "")
|
|
36
|
+
sys.path.append(os.path.dirname(__file__))
|
|
37
|
+
from inference import InferenceModel
|
|
38
|
+
|
|
39
|
+
self.inference_obj = InferenceModel()
|
|
40
|
+
|
|
41
|
+
# Read input_name from config file
|
|
42
|
+
self.config_msg = ModelConfig()
|
|
43
|
+
with open(os.path.join(args["model_repository"], "config.pbtxt"), "r") as f:
|
|
44
|
+
cfg = f.read()
|
|
45
|
+
text_format.Merge(cfg, self.config_msg)
|
|
46
|
+
self.input_name = [inp.name for inp in self.config_msg.input][0]
|
|
47
|
+
|
|
48
|
+
def execute(self, requests):
|
|
49
|
+
"""
|
|
50
|
+
Serve model inference requests.
|
|
51
|
+
"""
|
|
52
|
+
responses = []
|
|
53
|
+
|
|
54
|
+
for request in requests:
|
|
55
|
+
in_batch = pb_utils.get_input_tensor_by_name(request, self.input_name)
|
|
56
|
+
in_batch = in_batch.as_numpy()
|
|
57
|
+
inference_response = self.inference_obj.get_predictions(in_batch)
|
|
58
|
+
responses.append(inference_response)
|
|
59
|
+
|
|
60
|
+
return responses
|
|
File without changes
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# This file contains boilerplate code to allow users write their model
|
|
2
|
+
# inference code that will then interact with the Triton Inference Server
|
|
3
|
+
# Python backend to serve end user requests.
|
|
4
|
+
# The module name, module path, class name & get_predictions() method names MUST be maintained as is
|
|
5
|
+
# but other methods may be added within the class as deemed fit provided
|
|
6
|
+
# they are invoked within the main get_predictions() inference method
|
|
7
|
+
# if they play a role in any step of model inference
|
|
8
|
+
"""User model inference script."""
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
import torch
|
|
14
|
+
from transformers import AutoModel, ViTImageProcessor
|
|
15
|
+
|
|
16
|
+
from clarifai.models.model_serving.models.model_types import visual_embedder
|
|
17
|
+
from clarifai.models.model_serving.models.output import EmbeddingOutput
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class InferenceModel:
|
|
21
|
+
"""User model inference class."""
|
|
22
|
+
|
|
23
|
+
def __init__(self) -> None:
|
|
24
|
+
"""
|
|
25
|
+
Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
|
|
26
|
+
in this method so they are loaded only once for faster inference.
|
|
27
|
+
"""
|
|
28
|
+
self.base_path: Path = os.path.dirname(__file__)
|
|
29
|
+
self.huggingface_model_path = os.path.join(self.base_path, "vit-base-patch16-224")
|
|
30
|
+
self.processor = ViTImageProcessor.from_pretrained(self.huggingface_model_path)
|
|
31
|
+
self.model = AutoModel.from_pretrained(self.huggingface_model_path)
|
|
32
|
+
|
|
33
|
+
@visual_embedder
|
|
34
|
+
def get_predictions(self, input_data):
|
|
35
|
+
"""
|
|
36
|
+
Main model inference method.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
-----
|
|
40
|
+
input_data: A single input data item to predict on.
|
|
41
|
+
Input data can be an image or text, etc depending on the model type.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
--------
|
|
45
|
+
One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
|
|
46
|
+
"""
|
|
47
|
+
inputs = self.processor(images=input_data, return_tensors="pt")
|
|
48
|
+
with torch.no_grad():
|
|
49
|
+
embedding_vector = self.model(**inputs).last_hidden_state[:, 0].cpu().numpy()
|
|
50
|
+
|
|
51
|
+
return EmbeddingOutput(embedding_vector=embedding_vector[0])
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# Copyright 2023 Clarifai, Inc.
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
"""Triton inference server Python Backend Model."""
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
import sys
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
import triton_python_backend_utils as pb_utils
|
|
20
|
+
except ModuleNotFoundError:
|
|
21
|
+
pass
|
|
22
|
+
from google.protobuf import text_format
|
|
23
|
+
from tritonclient.grpc.model_config_pb2 import ModelConfig
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class TritonPythonModel:
|
|
27
|
+
"""
|
|
28
|
+
Triton Python BE Model.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def initialize(self, args):
|
|
32
|
+
"""
|
|
33
|
+
Triton server init.
|
|
34
|
+
"""
|
|
35
|
+
args["model_repository"] = args["model_repository"].replace("/1/model.py", "")
|
|
36
|
+
sys.path.append(os.path.dirname(__file__))
|
|
37
|
+
from inference import InferenceModel
|
|
38
|
+
|
|
39
|
+
self.inference_obj = InferenceModel()
|
|
40
|
+
|
|
41
|
+
# Read input_name from config file
|
|
42
|
+
self.config_msg = ModelConfig()
|
|
43
|
+
with open(os.path.join(args["model_repository"], "config.pbtxt"), "r") as f:
|
|
44
|
+
cfg = f.read()
|
|
45
|
+
text_format.Merge(cfg, self.config_msg)
|
|
46
|
+
self.input_name = [inp.name for inp in self.config_msg.input][0]
|
|
47
|
+
|
|
48
|
+
def execute(self, requests):
|
|
49
|
+
"""
|
|
50
|
+
Serve model inference requests.
|
|
51
|
+
"""
|
|
52
|
+
responses = []
|
|
53
|
+
|
|
54
|
+
for request in requests:
|
|
55
|
+
in_batch = pb_utils.get_input_tensor_by_name(request, self.input_name)
|
|
56
|
+
in_batch = in_batch.as_numpy()
|
|
57
|
+
inference_response = self.inference_obj.get_predictions(in_batch)
|
|
58
|
+
responses.append(inference_response)
|
|
59
|
+
|
|
60
|
+
return responses
|
|
File without changes
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# This file contains boilerplate code to allow users write their model
|
|
2
|
+
# inference code that will then interact with the Triton Inference Server
|
|
3
|
+
# Python backend to serve end user requests.
|
|
4
|
+
# The module name, module path, class name & get_predictions() method names MUST be maintained as is
|
|
5
|
+
# but other methods may be added within the class as deemed fit provided
|
|
6
|
+
# they are invoked within the main get_predictions() inference method
|
|
7
|
+
# if they play a role in any step of model inference
|
|
8
|
+
"""User model inference script."""
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
import torch
|
|
14
|
+
from transformers import AutoModelForSemanticSegmentation, SegformerImageProcessor
|
|
15
|
+
|
|
16
|
+
from clarifai.models.model_serving.models.model_types import visual_segmenter
|
|
17
|
+
from clarifai.models.model_serving.models.output import MasksOutput
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class InferenceModel:
|
|
21
|
+
"""User model inference class."""
|
|
22
|
+
|
|
23
|
+
def __init__(self) -> None:
|
|
24
|
+
"""
|
|
25
|
+
Load inference time artifacts that are called frequently .e.g. models, tokenizers, etc.
|
|
26
|
+
in this method so they are loaded only once for faster inference.
|
|
27
|
+
"""
|
|
28
|
+
self.base_path: Path = os.path.dirname(__file__)
|
|
29
|
+
self.huggingface_model_path = os.path.join(self.base_path, "segformer_b2_clothes")
|
|
30
|
+
#self.labels_path = os.path.join(Path(self.base_path).parents[0], "labels.txt")
|
|
31
|
+
self.processor = SegformerImageProcessor.from_pretrained(self.huggingface_model_path)
|
|
32
|
+
self.model = AutoModelForSemanticSegmentation.from_pretrained(self.huggingface_model_path)
|
|
33
|
+
|
|
34
|
+
@visual_segmenter
|
|
35
|
+
def get_predictions(self, input_data):
|
|
36
|
+
"""
|
|
37
|
+
Main model inference method.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
-----
|
|
41
|
+
input_data: A single input data item to predict on.
|
|
42
|
+
Input data can be an image or text, etc depending on the model type.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
--------
|
|
46
|
+
One of the clarifai.models.model_serving.models.output types. Refer to the README/docs
|
|
47
|
+
"""
|
|
48
|
+
inputs = self.processor(images=input_data, return_tensors="pt")
|
|
49
|
+
with torch.no_grad():
|
|
50
|
+
output = self.model(**inputs)
|
|
51
|
+
|
|
52
|
+
logits = output.logits.cpu()
|
|
53
|
+
mask = logits.argmax(dim=1)[0].numpy()
|
|
54
|
+
|
|
55
|
+
return MasksOutput(predicted_mask=mask)
|