clarifai 10.10.1__py3-none-any.whl → 10.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clarifai/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "10.10.1"
1
+ __version__ = "10.11.1"
clarifai/cli/model.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import click
2
+
2
3
  from clarifai.cli.base import cli
3
4
 
4
5
 
5
6
  @cli.group(['model'])
6
7
  def model():
7
- """Manage models: upload, test locally"""
8
+ """Manage models: upload, test locally, run_locally, predict"""
8
9
  pass
9
10
 
10
11
 
@@ -39,12 +40,43 @@ def upload(model_path, download_checkpoints, skip_dockerfile):
39
40
  type=click.Path(exists=True),
40
41
  required=True,
41
42
  help='Path to the model directory.')
42
- def test_locally(model_path):
43
+ @click.option(
44
+ '--mode',
45
+ type=click.Choice(['env', 'container'], case_sensitive=False),
46
+ default='env',
47
+ show_default=True,
48
+ help=
49
+ 'Specify how to test the model locally: "env" for virtual environment or "container" for Docker container. Defaults to "env".'
50
+ )
51
+ @click.option(
52
+ '--keep_env',
53
+ is_flag=True,
54
+ help=
55
+ 'Keep the virtual environment after testing the model locally (applicable for virtualenv mode). Defaults to False.'
56
+ )
57
+ @click.option(
58
+ '--keep_image',
59
+ is_flag=True,
60
+ help=
61
+ 'Keep the Docker image after testing the model locally (applicable for container mode). Defaults to False.'
62
+ )
63
+ def test_locally(model_path, keep_env=False, keep_image=False, mode='env'):
43
64
  """Test model locally."""
44
65
  try:
45
66
  from clarifai.runners.models import model_run_locally
46
- model_run_locally.main(model_path)
47
- click.echo(f"Model tested locally from {model_path}.")
67
+ if mode == 'env' and keep_image:
68
+ raise ValueError("'keep_image' is applicable only for 'container' mode")
69
+ if mode == 'container' and keep_env:
70
+ raise ValueError("'keep_env' is applicable only for 'env' mode")
71
+
72
+ if mode == "env":
73
+ click.echo("Testing model locally in a virtual environment...")
74
+ model_run_locally.main(model_path, run_model_server=False, keep_env=keep_env)
75
+ elif mode == "container":
76
+ click.echo("Testing model locally inside a container...")
77
+ model_run_locally.main(
78
+ model_path, inside_container=True, run_model_server=False, keep_image=keep_image)
79
+ click.echo("Model tested successfully.")
48
80
  except Exception as e:
49
81
  click.echo(f"Failed to test model locally: {e}", err=True)
50
82
 
@@ -55,11 +87,180 @@ def test_locally(model_path):
55
87
  type=click.Path(exists=True),
56
88
  required=True,
57
89
  help='Path to the model directory.')
58
- def run_locally(model_path):
59
- """Run model locally and starts a GRPC server to serve the model."""
90
+ @click.option(
91
+ '--port',
92
+ '-p',
93
+ type=int,
94
+ default=8000,
95
+ show_default=True,
96
+ help="The port to host the gRPC server for running the model locally. Defaults to 8000.")
97
+ @click.option(
98
+ '--mode',
99
+ type=click.Choice(['env', 'container'], case_sensitive=False),
100
+ default='env',
101
+ show_default=True,
102
+ help=
103
+ 'Specifies how to run the model: "env" for virtual environment or "container" for Docker container. Defaults to "env".'
104
+ )
105
+ @click.option(
106
+ '--keep_env',
107
+ is_flag=True,
108
+ help=
109
+ 'Keep the virtual environment after testing the model locally (applicable for virtualenv mode). Defaults to False.'
110
+ )
111
+ @click.option(
112
+ '--keep_image',
113
+ is_flag=True,
114
+ help=
115
+ 'Keep the Docker image after testing the model locally (applicable for container mode). Defaults to False.'
116
+ )
117
+ def run_locally(model_path, port, mode, keep_env, keep_image):
118
+ """Run the model locally and start a gRPC server to serve the model."""
60
119
  try:
61
120
  from clarifai.runners.models import model_run_locally
62
- model_run_locally.main(model_path, run_model_server=True)
63
- click.echo(f"Model server started locally from {model_path}.")
121
+ if mode == 'env' and keep_image:
122
+ raise ValueError("'keep_image' is applicable only for 'container' mode")
123
+ if mode == 'container' and keep_env:
124
+ raise ValueError("'keep_env' is applicable only for 'env' mode")
125
+
126
+ if mode == "env":
127
+ click.echo("Running model locally in a virtual environment...")
128
+ model_run_locally.main(model_path, run_model_server=True, keep_env=keep_env, port=port)
129
+ elif mode == "container":
130
+ click.echo("Running model locally inside a container...")
131
+ model_run_locally.main(
132
+ model_path,
133
+ inside_container=True,
134
+ run_model_server=True,
135
+ port=port,
136
+ keep_image=keep_image)
137
+ click.echo(f"Model server started locally from {model_path} in {mode} mode.")
64
138
  except Exception as e:
65
139
  click.echo(f"Failed to starts model server locally: {e}", err=True)
140
+
141
+
142
+ @model.command()
143
+ @click.option(
144
+ '--config',
145
+ type=click.Path(exists=True),
146
+ required=False,
147
+ help='Path to the model predict config file.')
148
+ @click.option('--model_id', required=False, help='Model ID of the model used to predict.')
149
+ @click.option('--user_id', required=False, help='User ID of the model used to predict.')
150
+ @click.option('--app_id', required=False, help='App ID of the model used to predict.')
151
+ @click.option('--model_url', required=False, help='Model URL of the model used to predict.')
152
+ @click.option('--file_path', required=False, help='File path of file for the model to predict')
153
+ @click.option('--url', required=False, help='URL to the file for the model to predict')
154
+ @click.option('--bytes', required=False, help='Bytes to the file for the model to predict')
155
+ @click.option(
156
+ '--input_id', required=False, help='Existing input id in the app for the model to predict')
157
+ @click.option('--input_type', required=False, help='Type of input')
158
+ @click.option(
159
+ '-cc_id',
160
+ '--compute_cluster_id',
161
+ required=False,
162
+ help='Compute Cluster ID to use for the model')
163
+ @click.option('-np_id', '--nodepool_id', required=False, help='Nodepool ID to use for the model')
164
+ @click.option(
165
+ '-dpl_id', '--deployment_id', required=False, help='Deployment ID to use for the model')
166
+ @click.option(
167
+ '--inference_params', required=False, default='{}', help='Inference parameters to override')
168
+ @click.option('--output_config', required=False, default='{}', help='Output config to override')
169
+ @click.pass_context
170
+ def predict(ctx, config, model_id, user_id, app_id, model_url, file_path, url, bytes, input_id,
171
+ input_type, compute_cluster_id, nodepool_id, deployment_id, inference_params,
172
+ output_config):
173
+ """Predict using the given model"""
174
+ import json
175
+
176
+ from clarifai.client.deployment import Deployment
177
+ from clarifai.client.input import Input
178
+ from clarifai.client.model import Model
179
+ from clarifai.client.nodepool import Nodepool
180
+ from clarifai.utils.cli import from_yaml
181
+ if config:
182
+ config = from_yaml(config)
183
+ model_id, user_id, app_id, model_url, file_path, url, bytes, input_id, input_type, compute_cluster_id, nodepool_id, deployment_id, inference_params, output_config = (
184
+ config.get(k, v)
185
+ for k, v in [('model_id', model_id), ('user_id', user_id), ('app_id', app_id), (
186
+ 'model_url', model_url), ('file_path', file_path), ('url', url), ('bytes', bytes), (
187
+ 'input_id',
188
+ input_id), ('input_type',
189
+ input_type), ('compute_cluster_id',
190
+ compute_cluster_id), ('nodepool_id', nodepool_id), (
191
+ 'deployment_id',
192
+ deployment_id), ('inference_params',
193
+ inference_params), ('output_config',
194
+ output_config)])
195
+ if sum([opt[1] for opt in [(model_id, 1), (user_id, 1), (app_id, 1), (model_url, 3)]
196
+ if opt[0]]) != 3:
197
+ raise ValueError("Either --model_id & --user_id & --app_id or --model_url must be provided.")
198
+ if sum([1 for opt in [file_path, url, bytes, input_id] if opt]) != 1:
199
+ raise ValueError("Exactly one of --file_path, --url, --bytes or --input_id must be provided.")
200
+ if compute_cluster_id or nodepool_id or deployment_id:
201
+ if sum([
202
+ opt[1] for opt in [(compute_cluster_id, 0.5), (nodepool_id, 0.5), (deployment_id, 1)]
203
+ if opt[0]
204
+ ]) != 1:
205
+ raise ValueError(
206
+ "Either --compute_cluster_id & --nodepool_id or --deployment_id must be provided.")
207
+ if model_url:
208
+ model = Model(url=model_url, pat=ctx.obj['pat'], base_url=ctx.obj['base_url'])
209
+ else:
210
+ model = Model(
211
+ model_id=model_id,
212
+ user_id=user_id,
213
+ app_id=app_id,
214
+ pat=ctx.obj['pat'],
215
+ base_url=ctx.obj['base_url'])
216
+
217
+ if inference_params:
218
+ inference_params = json.loads(inference_params)
219
+ if output_config:
220
+ output_config = json.loads(output_config)
221
+
222
+ if file_path:
223
+ model_prediction = model.predict_by_filepath(
224
+ filepath=file_path,
225
+ input_type=input_type,
226
+ compute_cluster_id=compute_cluster_id,
227
+ nodepool_id=nodepool_id,
228
+ deployment_id=deployment_id,
229
+ inference_params=inference_params,
230
+ output_config=output_config)
231
+ elif url:
232
+ model_prediction = model.predict_by_url(
233
+ url=url,
234
+ input_type=input_type,
235
+ compute_cluster_id=compute_cluster_id,
236
+ nodepool_id=nodepool_id,
237
+ deployment_id=deployment_id,
238
+ inference_params=inference_params,
239
+ output_config=output_config)
240
+ elif bytes:
241
+ bytes = str.encode(bytes)
242
+ model_prediction = model.predict_by_bytes(
243
+ input_bytes=bytes,
244
+ input_type=input_type,
245
+ compute_cluster_id=compute_cluster_id,
246
+ nodepool_id=nodepool_id,
247
+ deployment_id=deployment_id,
248
+ inference_params=inference_params,
249
+ output_config=output_config)
250
+ elif input_id:
251
+ inputs = [Input.get_input(input_id)]
252
+ runner_selector = None
253
+ if deployment_id:
254
+ runner_selector = Deployment.get_runner_selector(
255
+ user_id=ctx.obj['user_id'], deployment_id=deployment_id)
256
+ elif compute_cluster_id and nodepool_id:
257
+ runner_selector = Nodepool.get_runner_selector(
258
+ user_id=ctx.obj['user_id'],
259
+ compute_cluster_id=compute_cluster_id,
260
+ nodepool_id=nodepool_id)
261
+ model_prediction = model.predict(
262
+ inputs=inputs,
263
+ runner_selector=runner_selector,
264
+ inference_params=inference_params,
265
+ output_config=output_config)
266
+ click.echo(model_prediction)
clarifai/client/model.py CHANGED
@@ -743,7 +743,7 @@ class Model(Lister, BaseClient):
743
743
  file_bytes = f.read()
744
744
 
745
745
  return self.generate_by_bytes(
746
- filepath=file_bytes,
746
+ input_bytes=file_bytes,
747
747
  input_type=input_type,
748
748
  compute_cluster_id=compute_cluster_id,
749
749
  nodepool_id=nodepool_id,
@@ -165,6 +165,14 @@ class InputAnnotationDownloader:
165
165
  def _save_annotation_to_archive(self, new_archive: zipfile.ZipFile, annot_data: List[Dict],
166
166
  file_name: str) -> None:
167
167
  """Gets the annotation response bytestring (from requests) and append to zip file."""
168
+ # Fill zero values for missing bounding box keys
169
+ for annot in annot_data:
170
+ if annot.get('regionInfo') and annot['regionInfo'].get('boundingBox'):
171
+ bbox = annot['regionInfo']['boundingBox']
172
+ bbox.setdefault('topRow', 0)
173
+ bbox.setdefault('leftCol', 0)
174
+ bbox.setdefault('bottomRow', 0)
175
+ bbox.setdefault('rightCol', 0)
168
176
  # Serialize the dictionary to a JSON string
169
177
  json_str = json.dumps(annot_data)
170
178
  # Convert the JSON string to bytes
clarifai/rag/rag.py CHANGED
@@ -45,10 +45,14 @@ class RAG:
45
45
  self.logger.info("workflow_url:%s", workflow_url)
46
46
  w = Workflow(workflow_url, base_url=base_url, pat=pat)
47
47
  self._prompt_workflow = w
48
- self._app = App(app_id=w.app_id, base_url=w.base, pat=w.pat)
48
+ self._app = App(app_id=w.app_id, user_id=w.user_id, base_url=w.base, pat=w.pat)
49
49
  elif workflow_url is None and workflow is not None:
50
50
  self._prompt_workflow = workflow
51
- self._app = App(app_id=workflow.app_id, base_url=workflow.base, pat=workflow.pat)
51
+ self._app = App(
52
+ app_id=workflow.app_id,
53
+ user_id=workflow.user_id,
54
+ base_url=workflow.base,
55
+ pat=workflow.pat)
52
56
 
53
57
  @classmethod
54
58
  def setup(cls,
@@ -22,6 +22,8 @@ ENV CLARIFAI_API_BASE=${CLARIFAI_API_BASE}
22
22
 
23
23
  # Set the NUMBA cache dir to /tmp
24
24
  ENV NUMBA_CACHE_DIR=/tmp/numba_cache
25
+ # Set the TORCHINDUCTOR cache dir to /tmp
26
+ ENV TORCHINDUCTOR_CACHE_DIR=/tmp/torchinductor_cache
25
27
  ENV HOME=/tmp
26
28
 
27
29
  # Set the working directory to /app
@@ -24,7 +24,8 @@ class AnyAnyModel(ModelRunner):
24
24
  list_input_dict = [
25
25
  InputDataHandler.from_proto(input).to_python() for input in input_request.inputs
26
26
  ]
27
- inference_params = json_format.MessageToDict(input_request.model.output_info.params)
27
+ inference_params = json_format.MessageToDict(
28
+ input_request.model.model_version.output_info.params)
28
29
 
29
30
  return list_input_dict, inference_params
30
31
 
@@ -141,7 +142,8 @@ class VisualInputModel(AnyAnyModel):
141
142
  list_input_dict = [
142
143
  InputDataHandler.from_proto(input).image(format="np") for input in input_request.inputs
143
144
  ]
144
- inference_params = json_format.MessageToDict(input_request.model.output_info.params)
145
+ inference_params = json_format.MessageToDict(
146
+ input_request.model.model_version.output_info.params)
145
147
 
146
148
  return list_input_dict, inference_params
147
149
 
@@ -181,7 +183,8 @@ class TextInputModel(AnyAnyModel):
181
183
  def parse_input_request(
182
184
  self, input_request: service_pb2.PostModelOutputsRequest) -> Tuple[List[Dict], Dict]:
183
185
  list_input_text = [InputDataHandler.from_proto(input).text for input in input_request.inputs]
184
- inference_params = json_format.MessageToDict(input_request.model.output_info.params)
186
+ inference_params = json_format.MessageToDict(
187
+ input_request.model.model_version.output_info.params)
185
188
 
186
189
  return list_input_text, inference_params
187
190