clarifai 9.8.1__py3-none-any.whl → 9.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. clarifai/client/app.py +115 -14
  2. clarifai/client/base.py +11 -4
  3. clarifai/client/dataset.py +8 -3
  4. clarifai/client/input.py +34 -28
  5. clarifai/client/model.py +71 -2
  6. clarifai/client/module.py +4 -2
  7. clarifai/client/runner.py +161 -0
  8. clarifai/client/search.py +173 -0
  9. clarifai/client/user.py +110 -4
  10. clarifai/client/workflow.py +27 -2
  11. clarifai/constants/search.py +2 -0
  12. clarifai/datasets/upload/loaders/xview_detection.py +1 -1
  13. clarifai/models/model_serving/README.md +3 -3
  14. clarifai/models/model_serving/cli/deploy_cli.py +2 -3
  15. clarifai/models/model_serving/cli/repository.py +3 -5
  16. clarifai/models/model_serving/constants.py +1 -5
  17. clarifai/models/model_serving/docs/custom_config.md +5 -6
  18. clarifai/models/model_serving/docs/dependencies.md +5 -10
  19. clarifai/models/model_serving/examples/image_classification/age_vit/requirements.txt +1 -0
  20. clarifai/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +1 -0
  21. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +1 -0
  22. clarifai/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +1 -0
  23. clarifai/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +1 -1
  24. clarifai/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +1 -0
  25. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +1 -0
  26. clarifai/models/model_serving/model_config/__init__.py +2 -0
  27. clarifai/models/model_serving/model_config/config.py +298 -0
  28. clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +18 -0
  29. clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +18 -0
  30. clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +18 -0
  31. clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +18 -0
  32. clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +18 -0
  33. clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +28 -0
  34. clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +18 -0
  35. clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +18 -0
  36. clarifai/models/model_serving/model_config/serializer.py +1 -1
  37. clarifai/models/model_serving/models/default_test.py +22 -21
  38. clarifai/models/model_serving/models/output.py +2 -2
  39. clarifai/models/model_serving/pb_model_repository.py +2 -5
  40. clarifai/runners/__init__.py +0 -0
  41. clarifai/runners/example.py +33 -0
  42. clarifai/schema/search.py +60 -0
  43. clarifai/utils/logging.py +53 -3
  44. clarifai/versions.py +1 -1
  45. clarifai/workflows/__init__.py +0 -0
  46. clarifai/workflows/export.py +68 -0
  47. clarifai/workflows/utils.py +59 -0
  48. clarifai/workflows/validate.py +67 -0
  49. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/METADATA +20 -2
  50. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/RECORD +102 -86
  51. clarifai_utils/client/app.py +115 -14
  52. clarifai_utils/client/base.py +11 -4
  53. clarifai_utils/client/dataset.py +8 -3
  54. clarifai_utils/client/input.py +34 -28
  55. clarifai_utils/client/model.py +71 -2
  56. clarifai_utils/client/module.py +4 -2
  57. clarifai_utils/client/runner.py +161 -0
  58. clarifai_utils/client/search.py +173 -0
  59. clarifai_utils/client/user.py +110 -4
  60. clarifai_utils/client/workflow.py +27 -2
  61. clarifai_utils/constants/search.py +2 -0
  62. clarifai_utils/datasets/upload/loaders/xview_detection.py +1 -1
  63. clarifai_utils/models/model_serving/README.md +3 -3
  64. clarifai_utils/models/model_serving/cli/deploy_cli.py +2 -3
  65. clarifai_utils/models/model_serving/cli/repository.py +3 -5
  66. clarifai_utils/models/model_serving/constants.py +1 -5
  67. clarifai_utils/models/model_serving/docs/custom_config.md +5 -6
  68. clarifai_utils/models/model_serving/docs/dependencies.md +5 -10
  69. clarifai_utils/models/model_serving/examples/image_classification/age_vit/requirements.txt +1 -0
  70. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/requirements.txt +1 -0
  71. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/requirements.txt +1 -0
  72. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/requirements.txt +1 -0
  73. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/requirements.txt +1 -1
  74. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/requirements.txt +1 -0
  75. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/requirements.txt +1 -0
  76. clarifai_utils/models/model_serving/model_config/__init__.py +2 -0
  77. clarifai_utils/models/model_serving/model_config/config.py +298 -0
  78. clarifai_utils/models/model_serving/model_config/model_types_config/text-classifier.yaml +18 -0
  79. clarifai_utils/models/model_serving/model_config/model_types_config/text-embedder.yaml +18 -0
  80. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-image.yaml +18 -0
  81. clarifai_utils/models/model_serving/model_config/model_types_config/text-to-text.yaml +18 -0
  82. clarifai_utils/models/model_serving/model_config/model_types_config/visual-classifier.yaml +18 -0
  83. clarifai_utils/models/model_serving/model_config/model_types_config/visual-detector.yaml +28 -0
  84. clarifai_utils/models/model_serving/model_config/model_types_config/visual-embedder.yaml +18 -0
  85. clarifai_utils/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +18 -0
  86. clarifai_utils/models/model_serving/model_config/serializer.py +1 -1
  87. clarifai_utils/models/model_serving/models/default_test.py +22 -21
  88. clarifai_utils/models/model_serving/models/output.py +2 -2
  89. clarifai_utils/models/model_serving/pb_model_repository.py +2 -5
  90. clarifai_utils/runners/__init__.py +0 -0
  91. clarifai_utils/runners/example.py +33 -0
  92. clarifai_utils/schema/search.py +60 -0
  93. clarifai_utils/utils/logging.py +53 -3
  94. clarifai_utils/versions.py +1 -1
  95. clarifai_utils/workflows/__init__.py +0 -0
  96. clarifai_utils/workflows/export.py +68 -0
  97. clarifai_utils/workflows/utils.py +59 -0
  98. clarifai_utils/workflows/validate.py +67 -0
  99. clarifai/models/model_serving/envs/triton_conda-cp3.8-torch1.13.1-19f97078.yaml +0 -35
  100. clarifai/models/model_serving/envs/triton_conda-cp3.8-torch2.0.0-ce980f28.yaml +0 -51
  101. clarifai/models/model_serving/examples/image_classification/age_vit/triton_conda.yaml +0 -1
  102. clarifai/models/model_serving/examples/text_classification/xlm-roberta/triton_conda.yaml +0 -1
  103. clarifai/models/model_serving/examples/text_to_image/sd-v1.5/triton_conda.yaml +0 -1
  104. clarifai/models/model_serving/examples/text_to_text/bart-summarize/triton_conda.yaml +0 -1
  105. clarifai/models/model_serving/examples/visual_detection/yolov5x/triton_conda.yaml +0 -1
  106. clarifai/models/model_serving/examples/visual_embedding/vit-base/triton_conda.yaml +0 -1
  107. clarifai/models/model_serving/examples/visual_segmentation/segformer-b2/triton_conda.yaml +0 -1
  108. clarifai/models/model_serving/model_config/deploy.py +0 -75
  109. clarifai/models/model_serving/model_config/triton_config.py +0 -226
  110. clarifai_utils/models/model_serving/envs/triton_conda-cp3.8-torch1.13.1-19f97078.yaml +0 -35
  111. clarifai_utils/models/model_serving/envs/triton_conda-cp3.8-torch2.0.0-ce980f28.yaml +0 -51
  112. clarifai_utils/models/model_serving/examples/image_classification/age_vit/triton_conda.yaml +0 -1
  113. clarifai_utils/models/model_serving/examples/text_classification/xlm-roberta/triton_conda.yaml +0 -1
  114. clarifai_utils/models/model_serving/examples/text_to_image/sd-v1.5/triton_conda.yaml +0 -1
  115. clarifai_utils/models/model_serving/examples/text_to_text/bart-summarize/triton_conda.yaml +0 -1
  116. clarifai_utils/models/model_serving/examples/visual_detection/yolov5x/triton_conda.yaml +0 -1
  117. clarifai_utils/models/model_serving/examples/visual_embedding/vit-base/triton_conda.yaml +0 -1
  118. clarifai_utils/models/model_serving/examples/visual_segmentation/segformer-b2/triton_conda.yaml +0 -1
  119. clarifai_utils/models/model_serving/model_config/deploy.py +0 -75
  120. clarifai_utils/models/model_serving/model_config/triton_config.py +0 -226
  121. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/LICENSE +0 -0
  122. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/WHEEL +0 -0
  123. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/entry_points.txt +0 -0
  124. {clarifai-9.8.1.dist-info → clarifai-9.9.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,161 @@
1
+ # Copyright 2023 Clarifai, Inc.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ """Interface to Clarifai Runners API."""
14
+
15
+ import os
16
+
17
+ from clarifai_grpc.grpc.api import resources_pb2, service_pb2
18
+ from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
19
+ from google.protobuf import json_format
20
+
21
+ from clarifai.client.base import BaseClient
22
+ from clarifai.errors import UserError
23
+ from clarifai.utils.logging import get_logger
24
+
25
+
26
+ class Runner(BaseClient):
27
+ """Base class for remote inference runners. This should be subclassed with the run_input method
28
+ implemented to process each input in the request.
29
+
30
+ Then on the subclass call start() to start the run loop.
31
+ """
32
+
33
+ def __init__(self,
34
+ runner_id: str,
35
+ user_id: str = "",
36
+ check_runner_exists: bool = True,
37
+ base_url: str = "https://api.clarifai.com",
38
+ **kwargs) -> None:
39
+ """
40
+ Args:
41
+ runner_id (str): the id of the runner to use. Create the runner in the Clarifai API first
42
+ user_id (str): Clarifai User ID
43
+ base_url (str): Base API url. Default "https://api.clarifai.com"
44
+ """
45
+ user_id = user_id or os.environ.get("CLARIFAI_USER_ID", "")
46
+
47
+ if user_id == "":
48
+ raise UserError(
49
+ "Set CLARIFAI_USER_ID as environment variables or pass user_id as input arguments")
50
+
51
+ self.runner_id = runner_id
52
+ self.logger = get_logger("INFO", __name__)
53
+ self.kwargs = {**kwargs, 'id': runner_id, 'user_id': user_id}
54
+ self.runner_info = resources_pb2.Runner(**self.kwargs)
55
+ BaseClient.__init__(self, user_id=self.user_id, app_id="", base=base_url)
56
+
57
+ # Check that the runner exists.
58
+ if check_runner_exists:
59
+ request = service_pb2.GetRunnerRequest(user_app_id=self.user_app_id, runner_id=runner_id)
60
+ response = self._grpc_request(self.STUB.GetRunner, request)
61
+ if response.status.code != status_code_pb2.SUCCESS:
62
+ raise Exception(
63
+ f"""Error getting runner, are you use this is a valid runner id {runner_id} at the user_id
64
+ {self.user_app_id.user_id}.
65
+ Error: {response.status.description}""")
66
+
67
+ def start(self):
68
+ """Start the run loop. This will ask the Clarifai API for work, and when it gets work, it will run
69
+ the model on the inputs and post the results back to the Clarifai API. It will then ask for more
70
+ work again.
71
+ """
72
+ self._long_poll_loop()
73
+
74
+ def _run(self, request: service_pb2.PostModelOutputsRequest) -> service_pb2.MultiOutputResponse:
75
+ """Run the model on the given request. You shouldn't need to override this method, see run_input
76
+ for the implementation to process each input in the request.
77
+
78
+ Args:
79
+ request: service_pb2.PostModelOutputsRequest - the request to run the model on
80
+
81
+ Returns:
82
+ service_pb2.MultiOutputResponse - the response from the model's run_input implementation.
83
+ """
84
+ outputs = []
85
+ # TODO: parallelize this
86
+ for inp in request.inputs:
87
+ # TODO: handle errors
88
+ outputs.append(self.run_input(inp))
89
+
90
+ return service_pb2.MultiOutputResponse(
91
+ status=status_pb2.Status(
92
+ code=status_code_pb2.SUCCESS,
93
+ description="Success",
94
+ ),
95
+ outputs=outputs,
96
+ )
97
+
98
+ def run_input(self, input: resources_pb2.Input) -> resources_pb2.Output:
99
+ """Run the model on the given input in the request. This is the method you should override to
100
+ process each input in the request.
101
+
102
+ Args:
103
+ input: resources_pb2.Input - the input to run the model on
104
+
105
+ Returns:
106
+ resources_pb2.Output - the response from the model's run_input implementation.
107
+ """
108
+ raise NotImplementedError("run_input() not implemented")
109
+
110
+ def _long_poll_loop(self):
111
+ """This method will long poll for work, and when it gets work, it will run the model on the inputs
112
+ and post the results back to the Clarifai API. It will then long poll again for more work.
113
+ """
114
+ c = 0
115
+ # TODO: handle more errors within this loop so it never stops.
116
+ # TODO: perhaps have multiple processes running this loop to handle more work.
117
+ while True:
118
+ # Long poll waiting for work.
119
+ self.logger.info("Loop iteration: {}".format(c))
120
+ request = service_pb2.ListRunnerItemsRequest(
121
+ user_app_id=self.user_app_id, runner_id=self.runner_id)
122
+ work_response = self._grpc_request(self.STUB.ListRunnerItems, request)
123
+ if work_response.status.code == status_code_pb2.RUNNER_NEEDS_RETRY:
124
+ c += 1
125
+ continue # immediate restart the long poll
126
+ if work_response.status.code != status_code_pb2.SUCCESS:
127
+ raise Exception("Error getting work: {}".format(work_response.status.description))
128
+ if len(work_response.items) == 0:
129
+ self.logger.info("No work to do. Waiting...")
130
+ continue
131
+
132
+ # We have work to do. Run the model on the inputs.
133
+ for item in work_response.items:
134
+ if not item.HasField('post_model_outputs_request'):
135
+ raise Exception("Unexpected work item type: {}".format(item))
136
+ self.logger.info(
137
+ f"Working on item: {item.id} with inputs {len(item.post_model_outputs_request.inputs)}"
138
+ )
139
+ result = self._run(item.post_model_outputs_request)
140
+
141
+ request = service_pb2.PostRunnerItemOutputsRequest(
142
+ user_app_id=self.user_app_id,
143
+ item_id=item.id,
144
+ runner_id=self.runner_id,
145
+ runner_item_outputs=[service_pb2.RunnerItemOutput(multi_output_response=result)])
146
+ result_response = self._grpc_request(self.STUB.PostRunnerItemOutputs, request)
147
+ if result_response.status.code != status_code_pb2.SUCCESS:
148
+ raise Exception(
149
+ json_format.MessageToJson(result_response, preserving_proto_field_name=True))
150
+ # raise Exception("Error posting result: {}".format(result_response.status.description))
151
+
152
+ def __getattr__(self, name):
153
+ return getattr(self.runner_info, name)
154
+
155
+ def __str__(self):
156
+ init_params = [param for param in self.kwargs.keys()]
157
+ attribute_strings = [
158
+ f"{param}={getattr(self.runner_info, param)}" for param in init_params
159
+ if hasattr(self.runner_info, param)
160
+ ]
161
+ return f"Runner Details: \n{', '.join(attribute_strings)}\n"
@@ -0,0 +1,173 @@
1
+ from typing import Any, Callable, Dict, Generator
2
+
3
+ from clarifai_grpc.grpc.api import resources_pb2, service_pb2
4
+ from clarifai_grpc.grpc.api.status import status_code_pb2
5
+ from google.protobuf.json_format import MessageToDict
6
+ from google.protobuf.struct_pb2 import Struct
7
+ from schema import SchemaError
8
+
9
+ from clarifai.client.base import BaseClient
10
+ from clarifai.client.input import Inputs
11
+ from clarifai.client.lister import Lister
12
+ from clarifai.constants.search import DEFAULT_SEARCH_METRIC, DEFAULT_TOP_K
13
+ from clarifai.errors import UserError
14
+ from clarifai.schema.search import get_schema
15
+
16
+
17
+ class Search(Lister, BaseClient):
18
+
19
+ def __init__(self,
20
+ user_id,
21
+ app_id,
22
+ top_k: int = DEFAULT_TOP_K,
23
+ metric: str = DEFAULT_SEARCH_METRIC):
24
+ """Initialize the Search object.
25
+
26
+ Args:
27
+ user_id (str): User ID.
28
+ app_id (str): App ID.
29
+ top_k (int, optional): Top K results to retrieve. Defaults to 10.
30
+ metric (str, optional): Similarity metric (either 'cosine' or 'euclidean'). Defaults to 'cosine'.
31
+
32
+ Raises:
33
+ UserError: If the metric is not 'cosine' or 'euclidean'.
34
+ """
35
+ if metric not in ["cosine", "euclidean"]:
36
+ raise UserError("Metric should be either cosine or euclidean")
37
+
38
+ self.user_id = user_id
39
+ self.app_id = app_id
40
+ self.metric_distance = dict(cosine="COSINE_DISTANCE", euclidean="EUCLIDEAN_DISTANCE")[metric]
41
+ self.data_proto = resources_pb2.Data()
42
+
43
+ self.inputs = Inputs(user_id=self.user_id, app_id=self.app_id)
44
+ self.rank_filter_schema = get_schema()
45
+ BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id)
46
+ Lister.__init__(self, page_size=top_k)
47
+
48
+ def _get_annot_proto(self, **kwargs):
49
+ """Get an Annotation proto message based on keyword arguments.
50
+
51
+ Args:
52
+ **kwargs: Keyword arguments specifying the resource.
53
+
54
+ Returns:
55
+ resources_pb2.Annotation: An Annotation proto message.
56
+ """
57
+ if not kwargs:
58
+ return resources_pb2.Annotation()
59
+
60
+ self.data_proto = resources_pb2.Data()
61
+ for key, value in kwargs.items():
62
+ if key == "image_bytes":
63
+ image_proto = self.inputs.get_input_from_bytes("", image_bytes=value).data.image
64
+ self.data_proto.image.CopyFrom(image_proto)
65
+
66
+ elif key == "image_url":
67
+ image_proto = self.inputs.get_input_from_url("", image_url=value).data.image
68
+ self.data_proto.image.CopyFrom(image_proto)
69
+
70
+ elif key == "concepts":
71
+ for concept in value:
72
+ concept_proto = resources_pb2.Concept(**concept)
73
+ self.data_proto.concepts.add().CopyFrom(concept_proto)
74
+
75
+ elif key == "text_raw":
76
+ text_proto = self.inputs.get_input_from_bytes(
77
+ "", text_bytes=bytes(value, 'utf-8')).data.text
78
+ self.data_proto.text.CopyFrom(text_proto)
79
+
80
+ elif key == "metadata":
81
+ metadata_struct = Struct()
82
+ metadata_struct.update(value)
83
+ self.data_proto.metadata.CopyFrom(metadata_struct)
84
+
85
+ elif key == "geo_point":
86
+ geo_point_proto = self._get_geo_point_proto(value["longitude"], value["latitude"],
87
+ value["geo_limit"])
88
+ self.data_proto.geo.CopyFrom(geo_point_proto)
89
+
90
+ else:
91
+ raise UserError(f"kwargs contain key that is not supported: {key}")
92
+ return resources_pb2.Annotation(data=self.data_proto)
93
+
94
+ def _get_geo_point_proto(self, longitude: float, latitude: float,
95
+ geo_limit: float) -> resources_pb2.Geo:
96
+ """Get a GeoPoint proto message based on geographical data.
97
+
98
+ Args:
99
+ longitude (float): Longitude coordinate.
100
+ latitude (float): Latitude coordinate.
101
+ geo_limit (float): Geographical limit.
102
+
103
+ Returns:
104
+ resources_pb2.Geo: A Geo proto message.
105
+ """
106
+ return resources_pb2.Geo(
107
+ geo_point=resources_pb2.GeoPoint(longitude=longitude, latitude=latitude),
108
+ geo_limit=resources_pb2.GeoLimit(type="withinKilometers", value=geo_limit))
109
+
110
+ def list_all_pages_generator(
111
+ self, endpoint: Callable[..., Any], proto_message: Any,
112
+ request_data: Dict[str, Any]) -> Generator[Dict[str, Any], None, None]:
113
+ """Lists all pages of a resource.
114
+
115
+ Args:
116
+ endpoint (Callable): The endpoint to call.
117
+ proto_message (Any): The proto message to use.
118
+ request_data (dict): The request data to use.
119
+
120
+ Yields:
121
+ response_dict: The next item in the listing.
122
+ """
123
+ page = 1
124
+ request_data['pagination'] = service_pb2.Pagination(page=page, per_page=self.default_page_size)
125
+ while True:
126
+ request_data['pagination'].page = page
127
+ response = self._grpc_request(endpoint, proto_message(**request_data))
128
+ dict_response = MessageToDict(response, preserving_proto_field_name=True)
129
+ if response.status.code != status_code_pb2.SUCCESS:
130
+ raise Exception(f"Listing failed with response {response!r}")
131
+
132
+ if 'hits' not in list(dict_response.keys()):
133
+ break
134
+ page += 1
135
+ yield response
136
+
137
+ def query(self, ranks=[{}], filters=[{}]):
138
+ """Perform a query with rank and filters.
139
+
140
+ Args:
141
+ ranks (List[Dict], optional): List of rank parameters. Defaults to [{}].
142
+ filters (List[Dict], optional): List of filter parameters. Defaults to [{}].
143
+
144
+ Returns:
145
+ Generator[Dict[str, Any], None, None]: A generator of query results.
146
+ """
147
+ try:
148
+ self.rank_filter_schema.validate(ranks)
149
+ self.rank_filter_schema.validate(filters)
150
+ except SchemaError as err:
151
+ raise UserError(f"Invalid rank or filter input: {err}")
152
+
153
+ rank_annot_proto, filters_annot_proto = [], []
154
+ for rank_dict in ranks:
155
+ rank_annot_proto.append(self._get_annot_proto(**rank_dict))
156
+ for filter_dict in filters:
157
+ filters_annot_proto.append(self._get_annot_proto(**filter_dict))
158
+
159
+ all_ranks = [resources_pb2.Rank(annotation=rank_annot) for rank_annot in rank_annot_proto]
160
+ all_filters = [
161
+ resources_pb2.Filter(annotation=filter_annot) for filter_annot in filters_annot_proto
162
+ ]
163
+
164
+ request_data = dict(
165
+ user_app_id=self.user_app_id,
166
+ searches=[
167
+ resources_pb2.Search(
168
+ query=resources_pb2.Query(ranks=all_ranks, filters=all_filters),
169
+ metric=self.metric_distance)
170
+ ])
171
+
172
+ return self.list_all_pages_generator(self.STUB.PostAnnotationsSearches,
173
+ service_pb2.PostAnnotationsSearchesRequest, request_data)
clarifai/client/user.py CHANGED
@@ -1,28 +1,32 @@
1
1
  from typing import Any, Dict, List
2
2
 
3
- from clarifai_grpc.grpc.api import resources_pb2, service_pb2 # noqa: F401
3
+ from clarifai_grpc.grpc.api import resources_pb2, service_pb2
4
4
  from clarifai_grpc.grpc.api.status import status_code_pb2
5
+ from google.protobuf.json_format import MessageToDict
5
6
 
6
7
  from clarifai.client.app import App
7
8
  from clarifai.client.base import BaseClient
8
9
  from clarifai.client.lister import Lister
10
+ from clarifai.client.runner import Runner
11
+ from clarifai.errors import UserError
9
12
  from clarifai.utils.logging import get_logger
10
13
 
11
14
 
12
15
  class User(Lister, BaseClient):
13
16
  """User is a class that provides access to Clarifai API endpoints related to user information."""
14
17
 
15
- def __init__(self, user_id: str = "", **kwargs):
18
+ def __init__(self, user_id: str = "", base_url: str = "https://api.clarifai.com", **kwargs):
16
19
  """Initializes an User object.
17
20
 
18
21
  Args:
19
22
  user_id (str): The user ID for the user to interact with.
20
- **kwargs: Additional keyword arguments to be passed to the ClarifaiAuthHelper.
23
+ base_url (str): Base API url. Default "https://api.clarifai.com"
24
+ **kwargs: Additional keyword arguments to be passed to the User.
21
25
  """
22
26
  self.kwargs = {**kwargs, 'id': user_id}
23
27
  self.user_info = resources_pb2.User(**self.kwargs)
24
28
  self.logger = get_logger(logger_level="INFO", name=__name__)
25
- BaseClient.__init__(self, user_id=self.id, app_id="")
29
+ BaseClient.__init__(self, user_id=self.id, app_id="", base=base_url)
26
30
  Lister.__init__(self)
27
31
 
28
32
  def list_apps(self, filter_by: Dict[str, Any] = {}) -> List[App]:
@@ -45,6 +49,27 @@ class User(Lister, BaseClient):
45
49
 
46
50
  return [App(**app_info) for app_info in all_apps_info]
47
51
 
52
+ def list_runners(self, filter_by: Dict[str, Any] = {}) -> List[Runner]:
53
+ """List all runners for the user
54
+
55
+ Args:
56
+ filter_by (dict): A dictionary of filters to apply to the list of runners.
57
+
58
+ Returns:
59
+ List[Runner]: A list of Runner objects for the runners.
60
+
61
+ Example:
62
+ >>> from clarifai.client.user import User
63
+ >>> client = User(user_id="user_id")
64
+ >>> all_runners= client.list_runners()
65
+ """
66
+ request_data = dict(user_app_id=self.user_app_id, per_page=self.default_page_size, **filter_by)
67
+ all_runners_info = list(
68
+ self.list_all_pages_generator(self.STUB.ListRunners, service_pb2.ListRunnersRequest,
69
+ request_data))
70
+
71
+ return [Runner(check_runner_exists=False, **runner_info) for runner_info in all_runners_info]
72
+
48
73
  def create_app(self, app_id: str, base_workflow: str = 'Language-Understanding',
49
74
  **kwargs) -> App:
50
75
  """Creates an app for the user.
@@ -73,6 +98,41 @@ class User(Lister, BaseClient):
73
98
  kwargs.update({'user_id': self.id})
74
99
  return App(app_id=app_id, **kwargs)
75
100
 
101
+ def create_runner(self, runner_id: str, labels: List[str], description: str) -> Runner:
102
+ """Create a runner
103
+
104
+ Args:
105
+ runner_id (str): The Id of runner to create
106
+ labels (List[str]): Labels to match runner
107
+ description (str): Description of Runner
108
+
109
+ Returns:
110
+ Runner: A runner object for the specified Runner ID
111
+
112
+ Example:
113
+ >>> from clarifai.client.user import User
114
+ >>> client = User(user_id="user_id")
115
+ >>> runner = client.create_runner(runner_id="runner_id", labels=["label to link runner"], description="laptop runner")
116
+ """
117
+ if not isinstance(labels, List):
118
+ raise UserError("Labels must be a List of strings")
119
+
120
+ request = service_pb2.PostRunnersRequest(
121
+ user_app_id=self.user_app_id,
122
+ runners=[resources_pb2.Runner(id=runner_id, labels=labels, description=description)])
123
+ response = self._grpc_request(self.STUB.PostRunners, request)
124
+
125
+ if response.status.code != status_code_pb2.SUCCESS:
126
+ raise Exception(response.status)
127
+ self.logger.info("\nRunner created\n%s", response.status)
128
+
129
+ return Runner(
130
+ runner_id=runner_id,
131
+ user_id=self.id,
132
+ labels=labels,
133
+ description=description,
134
+ check_runner_exists=False)
135
+
76
136
  def app(self, app_id: str, **kwargs) -> App:
77
137
  """Returns an App object for the specified app ID.
78
138
 
@@ -96,6 +156,34 @@ class User(Lister, BaseClient):
96
156
  kwargs['user_id'] = self.id
97
157
  return App(app_id=app_id, **kwargs)
98
158
 
159
+ def runner(self, runner_id: str) -> Runner:
160
+ """Returns a Runner object if exists.
161
+
162
+ Args:
163
+ runner_id (str): The runner ID to interact with
164
+
165
+ Returns:
166
+ Runner: A Runner object for the existing runner ID.
167
+
168
+ Example:
169
+ >>> from clarifai.client.user import User
170
+ >>> client = User(user_id="user_id")
171
+ >>> runner = client.runner(runner_id="runner_id")
172
+ """
173
+ request = service_pb2.GetRunnerRequest(user_app_id=self.user_app_id, runner_id=runner_id)
174
+ response = self._grpc_request(self.STUB.GetRunner, request)
175
+ if response.status.code != status_code_pb2.SUCCESS:
176
+ raise Exception(
177
+ f"""Error getting runner, are you use this is a valid runner id {runner_id} at the user_id/app_id
178
+ {self.user_app_id.user_id}/{self.user_app_id.app_id}.
179
+ Error: {response.status.description}""")
180
+
181
+ dict_response = MessageToDict(response, preserving_proto_field_name=True)
182
+ kwargs = self.process_response_keys(dict_response[list(dict_response.keys())[1]],
183
+ list(dict_response.keys())[1])
184
+
185
+ return Runner(check_runner_exists=False, **kwargs)
186
+
99
187
  def delete_app(self, app_id: str) -> None:
100
188
  """Deletes an app for the user.
101
189
 
@@ -113,6 +201,24 @@ class User(Lister, BaseClient):
113
201
  raise Exception(response.status)
114
202
  self.logger.info("\nApp Deleted\n%s", response.status)
115
203
 
204
+ def delete_runner(self, runner_id: str) -> None:
205
+ """Deletes all spectified runner ids
206
+
207
+ Args:
208
+ runner_ids (str): List of runners to delete
209
+
210
+ Example:
211
+ >>> from clarifai.client.user import User
212
+ >>> client = User(user_id="user_id")
213
+ >>> client.delete_runner(runner_id="runner_id")
214
+ """
215
+ request = service_pb2.DeleteRunnersRequest(user_app_id=self.user_app_id, ids=[runner_id])
216
+ response = self._grpc_request(self.STUB.DeleteRunners, request)
217
+
218
+ if response.status.code != status_code_pb2.SUCCESS:
219
+ raise Exception(response.status)
220
+ self.logger.info("\nRunner Deleted\n%s", response.status)
221
+
116
222
  def __getattr__(self, name):
117
223
  return getattr(self.user_info, name)
118
224
 
@@ -10,6 +10,7 @@ from clarifai.client.lister import Lister
10
10
  from clarifai.errors import UserError
11
11
  from clarifai.urls.helper import ClarifaiUrlHelper
12
12
  from clarifai.utils.logging import get_logger
13
+ from clarifai.workflows.export import Exporter
13
14
 
14
15
 
15
16
  class Workflow(Lister, BaseClient):
@@ -20,6 +21,7 @@ class Workflow(Lister, BaseClient):
20
21
  workflow_id: str = "",
21
22
  workflow_version: Dict = {'id': ""},
22
23
  output_config: Dict = {'min_value': 0},
24
+ base_url: str = "https://api.clarifai.com",
23
25
  **kwargs):
24
26
  """Initializes a Workflow object.
25
27
 
@@ -32,7 +34,8 @@ class Workflow(Lister, BaseClient):
32
34
  max_concepts (int): The maximum number of concepts to return.
33
35
  select_concepts (list[Concept]): The concepts to select.
34
36
  sample_ms (int): The number of milliseconds to sample.
35
- **kwargs: Additional keyword arguments to be passed to the ClarifaiAuthHelper.
37
+ base_url (str): Base API url. Default "https://api.clarifai.com"
38
+ **kwargs: Additional keyword arguments to be passed to the Workflow.
36
39
  """
37
40
  if url_init != "" and workflow_id != "":
38
41
  raise UserError("You can only specify one of url_init or workflow_id.")
@@ -47,7 +50,7 @@ class Workflow(Lister, BaseClient):
47
50
  self.output_config = output_config
48
51
  self.workflow_info = resources_pb2.Workflow(**self.kwargs)
49
52
  self.logger = get_logger(logger_level="INFO")
50
- BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id)
53
+ BaseClient.__init__(self, user_id=self.user_id, app_id=self.app_id, base=base_url)
51
54
  Lister.__init__(self)
52
55
 
53
56
  def predict(self, inputs: List[Input]):
@@ -182,6 +185,28 @@ class Workflow(Lister, BaseClient):
182
185
  for workflow_version_info in all_workflow_versions_info
183
186
  ]
184
187
 
188
+ def export(self, out_path: str):
189
+ """Exports the workflow to a yaml file.
190
+
191
+ Args:
192
+ out_path (str): The path to save the yaml file to.
193
+
194
+ Example:
195
+ >>> from clarifai.client.workflow import Workflow
196
+ >>> workflow = Workflow("https://clarifai.com/clarifai/main/workflows/Demographics")
197
+ >>> workflow.export('out_path.yml')
198
+ """
199
+ request = service_pb2.GetWorkflowRequest(user_app_id=self.user_app_id, workflow_id=self.id)
200
+ response = self._grpc_request(self.STUB.GetWorkflow, request)
201
+ if response.status.code != status_code_pb2.SUCCESS:
202
+ raise Exception(f"Workflow Export failed with response {response.status!r}")
203
+
204
+ with Exporter(response) as e:
205
+ e.parse()
206
+ e.export(out_path)
207
+
208
+ self.logger.info(f"Exported workflow to {out_path}")
209
+
185
210
  def __getattr__(self, name):
186
211
  return getattr(self.workflow_info, name)
187
212
 
@@ -0,0 +1,2 @@
1
+ DEFAULT_TOP_K = 10
2
+ DEFAULT_SEARCH_METRIC = "cosine"
@@ -42,7 +42,7 @@ class xviewDetectionDataLoader(ClarifaiDataLoader):
42
42
  self.img_comp_dir = os.path.join(self.data_dir, "train_images_comp")
43
43
  self.label_file = os.path.join(self.data_dir, "xview_train.geojson")
44
44
 
45
- # self.preprocess()
45
+ self.preprocess()
46
46
  self.all_data = self.xview_data_parser()
47
47
 
48
48
  self.load_data()
@@ -16,10 +16,10 @@ $ clarifai-model-upload-init --model_name <Your model name> \
16
16
  3. Add your model loading and inference code inside `inference.py` script of the generated model repository under the `setup()` and `predict()` functions respectively. Refer to The [Inference Script section]() for a description of this file.
17
17
  4. Testing your implementation locally by running `<your_triton_folder>/1/test.py` with basic predefined tests.
18
18
  To avoid missing dependencies when deploying, recommend to use conda to create clean environment from [Clarifai base envs](./envs/). Then install everything in `requirements.txt`. Follow instruction inside [test.py](./models/test.py) for implementing custom tests.
19
- * Create conda env: The base envs are attached in [./envs](./envs/), these are yaml file named as `triton_conda-python_version-torch_version-xxxx.yaml` format. Make sure base env that you're about to create is matched the one in your_triton_folder/triton_conda.yaml. To create conda env and install requirements run:
19
+ * Create conda env and install requirements:
20
20
  ```bash
21
- # create env
22
- conda env create -n <your_env> -f <base env name>.yaml
21
+ # create env (note: only python version 3.8 is supported currently)
22
+ conda create -n <your_env> python=3.8
23
23
  # activate it
24
24
  conda activate <your_env>
25
25
  # install dependencies
@@ -15,8 +15,7 @@ import argparse
15
15
 
16
16
  from clarifai.auth.helper import ClarifaiAuthHelper
17
17
  from clarifai.models.api import Models
18
- from clarifai.models.model_serving.constants import MODEL_TYPES
19
- from clarifai.models.model_serving.model_config.deploy import ClarifaiFieldsMap
18
+ from clarifai.models.model_serving.model_config import MODEL_TYPES, get_model_config
20
19
 
21
20
 
22
21
  def deploy(model_url,
@@ -50,7 +49,7 @@ def deploy(model_url,
50
49
  assert model_type, "Can not parse model_type from url, please input it directly"
51
50
  # key map
52
51
  assert model_type in MODEL_TYPES, f"model_type should be one of {MODEL_TYPES}"
53
- clarifai_key_map = ClarifaiFieldsMap(model_type=model_type)
52
+ clarifai_key_map = get_model_config(model_type=model_type).field_maps
54
53
  # if updating new version of existing model
55
54
  if update_version:
56
55
  resp = model_api.post_model_version(
@@ -14,8 +14,8 @@
14
14
 
15
15
  import argparse
16
16
 
17
- from ..constants import MODEL_TYPES
18
- from ..model_config.triton_config import TritonModelConfig
17
+ from ..constants import MAX_HW_DIM
18
+ from ..model_config import MODEL_TYPES, get_model_config
19
19
  from ..pb_model_repository import TritonModelRepository
20
20
 
21
21
 
@@ -63,7 +63,6 @@ def model_upload_init():
63
63
  help="Directory to create triton repository.")
64
64
 
65
65
  args = parser.parse_args()
66
- MAX_HW_DIM = 1024
67
66
 
68
67
  if len(args.image_shape) != 2:
69
68
  raise ValueError(
@@ -75,10 +74,9 @@ def model_upload_init():
75
74
  f"H and W each have a maximum value of 1024. Got H: {args.image_shape[0]}, W: {args.image_shape[1]}"
76
75
  )
77
76
 
78
- model_config = TritonModelConfig(
77
+ model_config = get_model_config(args.model_type).make_triton_model_config(
79
78
  model_name=args.model_name,
80
79
  model_version="1",
81
- model_type=args.model_type,
82
80
  image_shape=args.image_shape,
83
81
  )
84
82
 
@@ -1,5 +1 @@
1
- # Clarifai model types
2
- MODEL_TYPES = [
3
- "visual-detector", "visual-classifier", "text-classifier", "text-to-text", "text-embedder",
4
- "text-to-image", "visual-embedder", "visual-segmenter"
5
- ]
1
+ MAX_HW_DIM = 1024
@@ -15,17 +15,16 @@ $ clarifai-model-upload-init --model_name <Your model name> \
15
15
 
16
16
  ## Generating the triton model repository without the commandline
17
17
 
18
- The triton model repository can be generated via a python script specifying the same values as required in the commandline. Below is a sample of how the code would be structured.
18
+ The triton model repository can be generated via a python script specifying the same values as required in the commandline. Below is a sample of how the code would be structured with `visual_classifier`.
19
19
 
20
20
  ```python
21
- from clarifai.models.model_serving.model_config.triton_config import TritonModelConfig
22
- from clarifai.models.model_serving pb_model_repository import TritonModelRepository
21
+ from clarifai.models.model_serving.model_config import get_model_config, ModelTypes, TritonModelConfig
22
+ from clarifai.models.model_serving.pb_model_repository import TritonModelRepository
23
23
 
24
-
25
- model_config = TritonModelConfig(
24
+ model_type = ModelTypes.visual_classifier
25
+ model_config: TritonModelConfig = get_model_config(model_type).make_triton_model_config(
26
26
  model_name="<model_name>",
27
27
  model_version="1",
28
- model_type="<model_type>",
29
28
  image_shape=<[H,W]>, # 0 < [H,W] <= 1024
30
29
  )
31
30