clarifai 10.9.1__py3-none-any.whl → 10.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,10 +5,12 @@ from typing import Dict, Generator, List
5
5
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2
6
6
  from clarifai_grpc.grpc.api.resources_pb2 import Input
7
7
  from clarifai_grpc.grpc.api.status import status_code_pb2
8
+ from google.protobuf.json_format import MessageToDict
8
9
 
9
10
  from clarifai.client.base import BaseClient
10
11
  from clarifai.client.input import Inputs
11
12
  from clarifai.client.lister import Lister
13
+ from clarifai.client.model import Model
12
14
  from clarifai.constants.workflow import MAX_WORKFLOW_PREDICT_INPUTS
13
15
  from clarifai.errors import UserError
14
16
  from clarifai.urls.helper import ClarifaiUrlHelper
@@ -60,6 +62,7 @@ class Workflow(Lister, BaseClient):
60
62
  self.output_config = output_config
61
63
  self.workflow_info = resources_pb2.Workflow(**self.kwargs)
62
64
  self.logger = logger
65
+ self.input_types = None
63
66
  BaseClient.__init__(
64
67
  self,
65
68
  user_id=self.user_id,
@@ -109,20 +112,26 @@ class Workflow(Lister, BaseClient):
109
112
 
110
113
  return response
111
114
 
112
- def predict_by_filepath(self, filepath: str, input_type: str):
115
+ def predict_by_filepath(self, filepath: str, input_type: str = None):
113
116
  """Predicts the workflow based on the given filepath.
114
117
 
115
118
  Args:
116
119
  filepath (str): The filepath to predict.
117
- input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
120
+ input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
118
121
 
119
122
  Example:
120
123
  >>> from clarifai.client.workflow import Workflow
121
124
  >>> workflow = Workflow("url") # Example: https://clarifai.com/clarifai/main/workflows/Face-Sentiment
122
125
  or
123
126
  >>> workflow = Workflow(user_id='user_id', app_id='app_id', workflow_id='workflow_id')
124
- >>> workflow_prediction = workflow.predict_by_filepath('filepath', 'image')
127
+ >>> workflow_prediction = workflow.predict_by_filepath('filepath')
125
128
  """
129
+ if not input_type:
130
+ self.load_info()
131
+ if len(self.input_types) > 1:
132
+ raise UserError("Workflow has multiple input types. Please use workflow.predict().")
133
+ input_type = self.input_types[0]
134
+
126
135
  if input_type not in {'image', 'text', 'video', 'audio'}:
127
136
  raise UserError('Invalid input type it should be image, text, video or audio.')
128
137
  if not os.path.isfile(filepath):
@@ -133,13 +142,19 @@ class Workflow(Lister, BaseClient):
133
142
 
134
143
  return self.predict_by_bytes(file_bytes, input_type)
135
144
 
136
- def predict_by_bytes(self, input_bytes: bytes, input_type: str):
145
+ def predict_by_bytes(self, input_bytes: bytes, input_type: str = None):
137
146
  """Predicts the workflow based on the given bytes.
138
147
 
139
148
  Args:
140
149
  input_bytes (bytes): Bytes to predict on.
141
- input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
150
+ input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
142
151
  """
152
+ if not input_type:
153
+ self.load_info()
154
+ if len(self.input_types) > 1:
155
+ raise UserError("Workflow has multiple input types. Please use workflow.predict().")
156
+ input_type = self.input_types[0]
157
+
143
158
  if input_type not in {'image', 'text', 'video', 'audio'}:
144
159
  raise UserError('Invalid input type it should be image, text, video or audio.')
145
160
  if not isinstance(input_bytes, bytes):
@@ -156,20 +171,26 @@ class Workflow(Lister, BaseClient):
156
171
 
157
172
  return self.predict(inputs=[input_proto])
158
173
 
159
- def predict_by_url(self, url: str, input_type: str):
174
+ def predict_by_url(self, url: str, input_type: str = None):
160
175
  """Predicts the workflow based on the given URL.
161
176
 
162
177
  Args:
163
178
  url (str): The URL to predict.
164
- input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
179
+ input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
165
180
 
166
181
  Example:
167
182
  >>> from clarifai.client.workflow import Workflow
168
183
  >>> workflow = Workflow("url") # Example: https://clarifai.com/clarifai/main/workflows/Face-Sentiment
169
184
  or
170
185
  >>> workflow = Workflow(user_id='user_id', app_id='app_id', workflow_id='workflow_id')
171
- >>> workflow_prediction = workflow.predict_by_url('url', 'image')
186
+ >>> workflow_prediction = workflow.predict_by_url('url')
172
187
  """
188
+ if not input_type:
189
+ self.load_info()
190
+ if len(self.input_types) > 1:
191
+ raise UserError("Workflow has multiple input types. Please use workflow.predict().")
192
+ input_type = self.input_types[0]
193
+
173
194
  if input_type not in {'image', 'text', 'video', 'audio'}:
174
195
  raise UserError('Invalid input type it should be image, text, video or audio.')
175
196
 
@@ -245,6 +266,25 @@ class Workflow(Lister, BaseClient):
245
266
 
246
267
  self.logger.info(f"Exported workflow to {out_path}")
247
268
 
269
+ def load_info(self) -> None:
270
+ """Loads the workflow info."""
271
+ if not self.input_types:
272
+ request = service_pb2.GetWorkflowRequest(user_app_id=self.user_app_id, workflow_id=self.id)
273
+ response = self._grpc_request(self.STUB.GetWorkflow, request)
274
+ if response.status.code != status_code_pb2.SUCCESS:
275
+ raise Exception(f"Workflow Get failed with response {response.status!r}")
276
+
277
+ dict_response = MessageToDict(response, preserving_proto_field_name=True)
278
+ self.kwargs = self.process_response_keys(dict_response['workflow'])
279
+ self.workflow_info = resources_pb2.Workflow(**self.kwargs)
280
+
281
+ model = Model(
282
+ model_id=self.kwargs['nodes'][0]['model']['id'],
283
+ **self.kwargs['nodes'][0]['model'],
284
+ pat=self.pat)
285
+ model.load_input_types()
286
+ self.input_types = model.input_types
287
+
248
288
  def __getattr__(self, name):
249
289
  return getattr(self.workflow_info, name)
250
290
 
@@ -192,9 +192,11 @@ class InputAnnotationDownloader:
192
192
  self._save_video_to_archive(new_archive, hosted_url, file_name)
193
193
  self.num_inputs += 1
194
194
 
195
- if data_dict.get("concepts") or data_dict.get("regions"):
195
+ if data_dict.get("metadata") or data_dict.get("concepts") or data_dict.get("regions"):
196
196
  file_name = os.path.join(split, "annotations", input_.id + ".json")
197
- annot_data = data_dict.get("regions", []) + data_dict.get("concepts", [])
197
+ annot_data = [{
198
+ "metadata": data_dict.get("metadata", {})
199
+ }] + data_dict.get("regions", []) + data_dict.get("concepts", [])
198
200
 
199
201
  self._save_annotation_to_archive(new_archive, annot_data, file_name)
200
202
  self.num_annotations += 1
@@ -143,7 +143,7 @@ class DisplayUploadStatus:
143
143
 
144
144
  for data in dict_response["datasetVersionMetricsGroups"]:
145
145
  if isinstance(data["value"], str):
146
- if data["value"].startswith("id-"):
146
+ if ("type" in data) and (data["type"] == "CONCEPT_ID"):
147
147
  data["metrics"].update({"Concept": data["value"]})
148
148
  data["metrics"].pop("regionLocationMatrix", None)
149
149
  dataset_statistics.append(data["metrics"])
@@ -64,6 +64,10 @@ ENV CLARIFAI_NODEPOOL_ID=${CLARIFAI_NODEPOOL_ID}
64
64
  ENV CLARIFAI_COMPUTE_CLUSTER_ID=${CLARIFAI_COMPUTE_CLUSTER_ID}
65
65
  ENV CLARIFAI_API_BASE=${CLARIFAI_API_BASE}
66
66
 
67
+ # Set the NUMBA cache dir to /tmp
68
+ ENV NUMBA_CACHE_DIR=/tmp/numba_cache
69
+ ENV HOME=/tmp
70
+
67
71
  # Set the working directory to /app
68
72
  WORKDIR /app
69
73
 
@@ -1,4 +1,3 @@
1
- import argparse
2
1
  import importlib.util
3
2
  import inspect
4
3
  import os
@@ -22,8 +21,6 @@ class ModelRunLocally:
22
21
  def __init__(self, model_path):
23
22
  self.model_path = model_path
24
23
  self.requirements_file = os.path.join(self.model_path, "requirements.txt")
25
- self.venv_dir, self.temp_dir = self.create_temp_venv()
26
- self.python_executable = os.path.join(self.venv_dir, "bin", "python")
27
24
 
28
25
  def create_temp_venv(self):
29
26
  """Create a temporary virtual environment."""
@@ -32,6 +29,10 @@ class ModelRunLocally:
32
29
  venv_dir = os.path.join(temp_dir, "venv")
33
30
  venv.create(venv_dir, with_pip=True)
34
31
 
32
+ self.venv_dir = venv_dir
33
+ self.temp_dir = temp_dir
34
+ self.python_executable = os.path.join(venv_dir, "bin", "python")
35
+
35
36
  logger.info(f"Created temporary virtual environment at {venv_dir}")
36
37
  return venv_dir, temp_dir
37
38
 
@@ -124,8 +125,8 @@ class ModelRunLocally:
124
125
  runner_id="n/a",
125
126
  nodepool_id="n/a",
126
127
  compute_cluster_id="n/a",
128
+ user_id="n/a",
127
129
  )
128
- runner.load_model()
129
130
 
130
131
  # send an inference.
131
132
  response = self._run_model_inference(runner)
@@ -174,21 +175,16 @@ class ModelRunLocally:
174
175
  shutil.rmtree(self.temp_dir)
175
176
 
176
177
 
177
- def main():
178
- parser = argparse.ArgumentParser()
179
- parser.add_argument(
180
- '--model_path', type=str, required=True, help='Path of the model folder to upload')
181
- args = parser.parse_args()
178
+ def main(model_path, run_model_server=False):
182
179
 
183
- model_path = args.model_path
184
180
  manager = ModelRunLocally(model_path)
181
+ manager.create_temp_venv()
185
182
 
186
183
  try:
187
184
  manager.install_requirements()
188
- manager.test_model()
185
+ if run_model_server:
186
+ manager.run_model_server()
187
+ else:
188
+ manager.test_model()
189
189
  finally:
190
190
  manager.clean_up()
191
-
192
-
193
- if __name__ == "__main__":
194
- main()
@@ -28,7 +28,7 @@ class ModelServicer(service_pb2_grpc.V2Servicer):
28
28
  """
29
29
 
30
30
  # Download any urls that are not already bytes.
31
- ensure_urls_downloaded(self.url_fetcher, request)
31
+ ensure_urls_downloaded(request)
32
32
 
33
33
  try:
34
34
  return self.model_class.predict(request)
@@ -47,7 +47,7 @@ class ModelServicer(service_pb2_grpc.V2Servicer):
47
47
  returns an output.
48
48
  """
49
49
  # Download any urls that are not already bytes.
50
- ensure_urls_downloaded(self.url_fetcher, request)
50
+ ensure_urls_downloaded(request)
51
51
 
52
52
  try:
53
53
  return self.model_class.generate(request)
@@ -71,7 +71,7 @@ class ModelServicer(service_pb2_grpc.V2Servicer):
71
71
 
72
72
  # Download any urls that are not already bytes.
73
73
  for req in request:
74
- ensure_urls_downloaded(self.url_fetcher, req)
74
+ ensure_urls_downloaded(req)
75
75
 
76
76
  try:
77
77
  return self.model_class.stream(request_copy)
@@ -1,4 +1,3 @@
1
- import argparse
2
1
  import os
3
2
  import time
4
3
  from string import Template
@@ -60,9 +59,6 @@ class ModelUploader:
60
59
  return config
61
60
 
62
61
  def _validate_config_checkpoints(self):
63
- if not self.config.get("checkpoints"):
64
- logger.info("No checkpoints specified in the config file")
65
- return None
66
62
 
67
63
  assert "type" in self.config.get("checkpoints"), "No loader type specified in the config file"
68
64
  loader_type = self.config.get("checkpoints").get("type")
@@ -201,15 +197,20 @@ class ModelUploader:
201
197
  return f"{self.folder}.tar.gz"
202
198
 
203
199
  def download_checkpoints(self):
200
+ if not self.config.get("checkpoints"):
201
+ logger.info("No checkpoints specified in the config file")
202
+ return True
203
+
204
204
  repo_id, hf_token = self._validate_config_checkpoints()
205
- if repo_id and hf_token:
206
- loader = HuggingFaceLoader(repo_id=repo_id, token=hf_token)
207
- success = loader.download_checkpoints(self.checkpoint_path)
208
205
 
209
- if not success:
210
- logger.error(f"Failed to download checkpoints for model {repo_id}")
211
- return
206
+ loader = HuggingFaceLoader(repo_id=repo_id, token=hf_token)
207
+ success = loader.download_checkpoints(self.checkpoint_path)
208
+
209
+ if not success:
210
+ logger.error(f"Failed to download checkpoints for model {repo_id}")
211
+ else:
212
212
  logger.info(f"Downloaded checkpoints for model {repo_id}")
213
+ return success
213
214
 
214
215
  def _concepts_protos_from_concepts(self, concepts):
215
216
  concept_protos = []
@@ -245,15 +246,23 @@ class ModelUploader:
245
246
  model_type_id = self.config.get('model').get('model_type_id')
246
247
  if model_type_id in self.CONCEPTS_REQUIRED_MODEL_TYPE:
247
248
 
248
- labels = HuggingFaceLoader.fetch_labels(self.checkpoint_path)
249
- # sort the concepts by id and then update the config file
250
- labels = sorted(labels.items(), key=lambda x: int(x[0]))
249
+ if 'concepts' in self.config:
250
+ labels = self.config.get('concepts')
251
+ logger.info(f"Found {len(labels)} concepts in the config file.")
252
+ for concept in labels:
253
+ concept_proto = json_format.ParseDict(concept, resources_pb2.Concept())
254
+ model_version_proto.output_info.data.concepts.append(concept_proto)
255
+ else:
256
+ labels = HuggingFaceLoader.fetch_labels(self.checkpoint_path)
257
+ logger.info(f"Found {len(labels)} concepts from the model checkpoints.")
258
+ # sort the concepts by id and then update the config file
259
+ labels = sorted(labels.items(), key=lambda x: int(x[0]))
251
260
 
252
- config_file = os.path.join(self.folder, 'config.yaml')
253
- self.hf_labels_to_config(labels, config_file)
261
+ config_file = os.path.join(self.folder, 'config.yaml')
262
+ self.hf_labels_to_config(labels, config_file)
254
263
 
255
- model_version_proto.output_info.data.concepts.extend(
256
- self._concepts_protos_from_concepts(labels))
264
+ model_version_proto.output_info.data.concepts.extend(
265
+ self._concepts_protos_from_concepts(labels))
257
266
  return model_version_proto
258
267
 
259
268
  def upload_model_version(self, download_checkpoints):
@@ -261,17 +270,31 @@ class ModelUploader:
261
270
  logger.info(f"Will tar it into file: {file_path}")
262
271
 
263
272
  model_type_id = self.config.get('model').get('model_type_id')
264
- repo_id, hf_token = self._validate_config_checkpoints()
265
273
 
266
- loader = HuggingFaceLoader(repo_id=repo_id, token=hf_token)
267
-
268
- if not download_checkpoints and not loader.validate_download(self.checkpoint_path) and (
269
- model_type_id in self.CONCEPTS_REQUIRED_MODEL_TYPE) and 'concepts' not in self.config:
270
- logger.error(
271
- f"Model type {model_type_id} requires concepts to be specified in the config file or download the model checkpoints to infer the concepts."
274
+ if (model_type_id in self.CONCEPTS_REQUIRED_MODEL_TYPE) and 'concepts' not in self.config:
275
+ logger.info(
276
+ f"Model type {model_type_id} requires concepts to be specified in the config.yaml file.."
272
277
  )
273
- input("Press Enter to download the checkpoints to infer the concepts and continue...")
274
- self.download_checkpoints()
278
+ if self.config.get("checkpoints"):
279
+ logger.info(
280
+ "Checkpoints specified in the config.yaml file, will download the HF model's config.json file to infer the concepts."
281
+ )
282
+
283
+ if not download_checkpoints and not HuggingFaceLoader.validate_config(
284
+ self.checkpoint_path):
285
+
286
+ input(
287
+ "Press Enter to download the HuggingFace model's config.json file to infer the concepts and continue..."
288
+ )
289
+ repo_id, hf_token = self._validate_config_checkpoints()
290
+ loader = HuggingFaceLoader(repo_id=repo_id, token=hf_token)
291
+ loader.download_config(self.checkpoint_path)
292
+
293
+ else:
294
+ logger.error(
295
+ "No checkpoints specified in the config.yaml file to infer the concepts. Please either specify the concepts directly in the config.yaml file or include a checkpoints section to download the HF model's config.json file to infer the concepts."
296
+ )
297
+ return
275
298
 
276
299
  model_version_proto = self.get_model_version_proto()
277
300
 
@@ -385,11 +408,11 @@ class ModelUploader:
385
408
  return False
386
409
 
387
410
 
388
- def main(folder, download_checkpoints):
411
+ def main(folder, download_checkpoints, skip_dockerfile):
389
412
  uploader = ModelUploader(folder)
390
413
  if download_checkpoints:
391
414
  uploader.download_checkpoints()
392
- if not args.skip_dockerfile:
415
+ if not skip_dockerfile:
393
416
  uploader.create_dockerfile()
394
417
  exists = uploader.check_model_exists()
395
418
  if exists:
@@ -401,25 +424,3 @@ def main(folder, download_checkpoints):
401
424
 
402
425
  input("Press Enter to continue...")
403
426
  uploader.upload_model_version(download_checkpoints)
404
-
405
-
406
- if __name__ == "__main__":
407
- parser = argparse.ArgumentParser()
408
- parser.add_argument(
409
- '--model_path', type=str, help='Path of the model folder to upload', required=True)
410
- # flag to default to not download checkpoints
411
- parser.add_argument(
412
- '--download_checkpoints',
413
- action='store_true',
414
- help=
415
- 'Flag to download checkpoints before uploading and including them in the tar file that is uploaded. Defaults to False, which will attempt to download them at docker build time.',
416
- )
417
- parser.add_argument(
418
- '--skip_dockerfile',
419
- action='store_true',
420
- help=
421
- 'Flag to skip generating a dockerfile so that you can manually edit an already created dockerfile.',
422
- )
423
- args = parser.parse_args()
424
-
425
- main(args.model_path, args.download_checkpoints)
@@ -18,9 +18,17 @@ class HuggingFaceLoader:
18
18
  if importlib.util.find_spec("huggingface_hub") is None:
19
19
  raise ImportError(self.HF_DOWNLOAD_TEXT)
20
20
  os.environ['HF_TOKEN'] = token
21
+ from huggingface_hub import HfApi
22
+
23
+ api = HfApi()
24
+ api.whoami(token=token)
25
+
21
26
  subprocess.run(f'huggingface-cli login --token={os.environ["HF_TOKEN"]}', shell=True)
22
27
  except Exception as e:
23
- Exception("Error setting up Hugging Face token ", e)
28
+ logger.error(
29
+ f"Error setting up Hugging Face token, please make sure you have the correct token: {e}"
30
+ )
31
+ logger.info("Continuing without Hugging Face token")
24
32
 
25
33
  def download_checkpoints(self, checkpoint_path: str):
26
34
  # throw error if huggingface_hub wasn't installed
@@ -50,6 +58,28 @@ class HuggingFaceLoader:
50
58
  return False
51
59
  return True
52
60
 
61
+ def download_config(self, checkpoint_path: str):
62
+ # throw error if huggingface_hub wasn't installed
63
+ try:
64
+ from huggingface_hub import hf_hub_download
65
+ except ImportError:
66
+ raise ImportError(self.HF_DOWNLOAD_TEXT)
67
+ if os.path.exists(checkpoint_path) and os.path.exists(
68
+ os.path.join(checkpoint_path, 'config.json')):
69
+ logger.info("HF model's config.json already exists")
70
+ return True
71
+ os.makedirs(checkpoint_path, exist_ok=True)
72
+ try:
73
+ is_hf_model_exists = self.validate_hf_model()
74
+ if not is_hf_model_exists:
75
+ logger.error("Model %s not found on Hugging Face" % (self.repo_id))
76
+ return False
77
+ hf_hub_download(repo_id=self.repo_id, filename='config.json', local_dir=checkpoint_path)
78
+ except Exception as e:
79
+ logger.error(f"Error downloading model's config.json {e}")
80
+ return False
81
+ return True
82
+
53
83
  def validate_hf_model(self,):
54
84
  # check if model exists on HF
55
85
  try:
@@ -70,6 +100,12 @@ class HuggingFaceLoader:
70
100
  return (len(checkpoint_dir_files) >= len(list_repo_files(self.repo_id))) and len(
71
101
  list_repo_files(self.repo_id)) > 0
72
102
 
103
+ @staticmethod
104
+ def validate_config(checkpoint_path: str):
105
+ # check if downloaded config.json exists
106
+ return os.path.exists(checkpoint_path) and os.path.exists(
107
+ os.path.join(checkpoint_path, 'config.json'))
108
+
73
109
  @staticmethod
74
110
  def fetch_labels(checkpoint_path: str):
75
111
  # Fetch labels for classification, detection and segmentation models
clarifai/utils/cli.py ADDED
@@ -0,0 +1,45 @@
1
+ import importlib
2
+ import os
3
+ import pkgutil
4
+
5
+ import click
6
+ import yaml
7
+
8
+
9
+ def from_yaml(filename: str):
10
+ try:
11
+ with open(filename, 'r') as f:
12
+ return yaml.safe_load(f)
13
+ except yaml.YAMLError as e:
14
+ click.echo(f"Error reading YAML file: {e}", err=True)
15
+ return {}
16
+
17
+
18
+ def dump_yaml(data, filename: str):
19
+ try:
20
+ with open(filename, 'w') as f:
21
+ yaml.dump(data, f)
22
+ except Exception as e:
23
+ click.echo(f"Error writing YAML file: {e}", err=True)
24
+
25
+
26
+ def set_base_url(env):
27
+ environments = {
28
+ 'prod': 'https://api.clarifai.com',
29
+ 'staging': 'https://api-staging.clarifai.com',
30
+ 'dev': 'https://api-dev.clarifai.com'
31
+ }
32
+
33
+ if env in environments:
34
+ return environments[env]
35
+ else:
36
+ raise ValueError("Invalid environment. Please choose from 'prod', 'staging', 'dev'.")
37
+
38
+
39
+ # Dynamically find and import all command modules from the cli directory
40
+ def load_command_modules():
41
+ package_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'cli')
42
+
43
+ for _, module_name, _ in pkgutil.iter_modules([package_dir]):
44
+ if module_name != 'base': # Skip the base.py file itself
45
+ importlib.import_module(f'clarifai.cli.{module_name}')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: clarifai
3
- Version: 10.9.1
3
+ Version: 10.9.4
4
4
  Summary: Clarifai Python SDK
5
5
  Home-page: https://github.com/Clarifai/clarifai-python
6
6
  Author: Clarifai
@@ -20,7 +20,7 @@ Classifier: Operating System :: OS Independent
20
20
  Requires-Python: >=3.8
21
21
  Description-Content-Type: text/markdown
22
22
  License-File: LICENSE
23
- Requires-Dist: clarifai-grpc >=10.8.8
23
+ Requires-Dist: clarifai-grpc >=10.9.11
24
24
  Requires-Dist: clarifai-protocol >=0.0.6
25
25
  Requires-Dist: numpy >=1.22.0
26
26
  Requires-Dist: tqdm >=4.65.0
@@ -33,15 +33,17 @@ Requires-Dist: inquirerpy ==0.3.4
33
33
  Requires-Dist: tabulate >=0.9.0
34
34
  Requires-Dist: protobuf ==5.27.3
35
35
  Requires-Dist: fsspec ==2024.6.1
36
+ Requires-Dist: click ==8.1.7
36
37
  Provides-Extra: all
37
38
  Requires-Dist: pycocotools ==2.0.6 ; extra == 'all'
38
39
 
39
40
  <h1 align="center">
40
- <a href="https://www.clarifai.com/"><img alt="Clarifai" title="Clarifai" src="https://upload.wikimedia.org/wikipedia/commons/b/bc/Clarifai_Logo_FC_Web.png"></a>
41
+ <a href="https://www.clarifai.com/"><img alt="Clarifai" title="Clarifai" src="https://github.com/user-attachments/assets/623b883b-7fe5-4b95-bbfa-8691f5779af4"></a>
41
42
  </h1>
42
43
 
44
+
43
45
  <h2 align="center">
44
- Clarifai Python SDK</a>
46
+ Clarifai Python SDK
45
47
  </h2>
46
48
 
47
49
 
@@ -51,6 +53,8 @@ Clarifai Python SDK</a>
51
53
  </a>
52
54
  <a href="https://pypi.org/project/clarifai" target="_blank"> <img src="https://img.shields.io/pypi/dm/clarifai" alt="PyPI - Downloads">
53
55
  </a>
56
+ <a href="https://img.shields.io/pypi/pyversions/clarifai" target="_blank"> <img src="https://img.shields.io/pypi/pyversions/clarifai" alt="PyPI - Versions">
57
+ </a>
54
58
  </p>
55
59
 
56
60
 
@@ -261,19 +265,19 @@ print(gpt_4_model)
261
265
 
262
266
 
263
267
  # Model Predict
264
- model_prediction = Model("https://clarifai.com/anthropic/completion/models/claude-v2").predict_by_bytes(b"Write a tweet on future of AI", input_type="text")
268
+ model_prediction = Model("https://clarifai.com/anthropic/completion/models/claude-v2").predict_by_bytes(b"Write a tweet on future of AI")
265
269
 
266
270
  # Customizing Model Inference Output
267
- model_prediction = gpt_4_model.predict_by_bytes(b"Write a tweet on future of AI", "text", inference_params=dict(temperature=str(0.7), max_tokens=30))
271
+ model_prediction = gpt_4_model.predict_by_bytes(b"Write a tweet on future of AI", inference_params=dict(temperature=str(0.7), max_tokens=30))
268
272
  # Return predictions having prediction confidence > 0.98
269
- model_prediction = model.predict_by_filepath(filepath="local_filepath", input_type, output_config={"min_value": 0.98}) # Supports image, text, audio, video
273
+ model_prediction = model.predict_by_filepath(filepath="local_filepath", output_config={"min_value": 0.98}) # Supports image, text, audio, video
270
274
 
271
275
  # Supports prediction by url
272
- model_prediction = model.predict_by_url(url="url", input_type) # Supports image, text, audio, video
276
+ model_prediction = model.predict_by_url(url="url") # Supports image, text, audio, video
273
277
 
274
278
  # Return predictions for specified interval of video
275
279
  video_input_proto = [input_obj.get_input_from_url("Input_id", video_url=BEER_VIDEO_URL)]
276
- model_prediction = model.predict(video_input_proto, input_type="video", output_config={"sample_ms": 2000})
280
+ model_prediction = model.predict(video_input_proto, output_config={"sample_ms": 2000})
277
281
  ```
278
282
  #### Model Training
279
283
  ```python
@@ -383,12 +387,12 @@ from clarifai.client.workflow import Workflow
383
387
 
384
388
  # Workflow Predict
385
389
  workflow = Workflow("workflow_url") # Example: https://clarifai.com/clarifai/main/workflows/Face-Sentiment
386
- workflow_prediction = workflow.predict_by_url(url="url", input_type="image") # Supports image, text, audio, video
390
+ workflow_prediction = workflow.predict_by_url(url="url") # Supports image, text, audio, video
387
391
 
388
392
  # Customizing Workflow Inference Output
389
393
  workflow = Workflow(user_id="user_id", app_id="app_id", workflow_id="workflow_id",
390
394
  output_config={"min_value": 0.98}) # Return predictions having prediction confidence > 0.98
391
- workflow_prediction = workflow.predict_by_filepath(filepath="local_filepath", input_type="text") # Supports image, text, audio, video
395
+ workflow_prediction = workflow.predict_by_filepath(filepath="local_filepath") # Supports image, text, audio, video
392
396
  ```
393
397
 
394
398
  #### Workflows Listing