clarifai 10.5.0__py3-none-any.whl → 10.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clarifai/client/app.py CHANGED
@@ -30,6 +30,7 @@ class App(Lister, BaseClient):
30
30
  def __init__(self,
31
31
  url: str = None,
32
32
  app_id: str = None,
33
+ user_id: str = None,
33
34
  base_url: str = "https://api.clarifai.com",
34
35
  pat: str = None,
35
36
  token: str = None,
@@ -53,6 +54,9 @@ class App(Lister, BaseClient):
53
54
  if url:
54
55
  user_id, app_id = ClarifaiUrlHelper.split_clarifai_app_url(url)
55
56
  kwargs = {'user_id': user_id}
57
+ if user_id:
58
+ kwargs = {'user_id': user_id}
59
+
56
60
  self.kwargs = {**kwargs, 'id': app_id}
57
61
  self.app_info = resources_pb2.App(**self.kwargs)
58
62
  self.logger = get_logger(logger_level="INFO", name=__name__)
@@ -105,9 +105,6 @@ class ClarifaiAuthHelper:
105
105
  if self.user_id == "":
106
106
  raise Exception(
107
107
  "Need 'user_id' to not be empty in the query params or user CLARIFAI_USER_ID env var")
108
- if self.app_id == "":
109
- raise Exception(
110
- "Need 'app_id' to not be empty in the query params or user CLARIFAI_APP_ID env var")
111
108
  if self._pat != "" and self._token != "":
112
109
  raise Exception(
113
110
  "A personal access token OR a session token need to be provided, but you cannot provide both."
@@ -1,3 +1,4 @@
1
+ import itertools
1
2
  import logging
2
3
  import time
3
4
  from concurrent.futures import ThreadPoolExecutor
@@ -20,6 +21,34 @@ retry_codes_grpc = {
20
21
  _threadpool = ThreadPoolExecutor(100)
21
22
 
22
23
 
24
+ def validate_response(response, attempt, max_attempts):
25
+ # Helper function to handle simple response validation
26
+ def handle_simple_response(response):
27
+ if hasattr(response, 'status') and hasattr(response.status, 'code'):
28
+ if (response.status.code in throttle_status_codes) and attempt < max_attempts:
29
+ logging.debug('Retrying with status %s' % str(response.status))
30
+ return None # Indicates a retry is needed
31
+ else:
32
+ return response
33
+
34
+ # Check if the response is an instance of a gRPC streaming call
35
+ if isinstance(response, grpc._channel._MultiThreadedRendezvous):
36
+ try:
37
+ # Check just the first response in the stream for validation
38
+ first_res = next(response)
39
+ validated_response = handle_simple_response(first_res)
40
+ if validated_response is not None:
41
+ # Have to return that first response and the rest of the stream.
42
+ return itertools.chain([validated_response], response)
43
+ return None # Indicates a retry is needed
44
+ except grpc.RpcError as e:
45
+ logging.error('Error processing streaming response: %s' % str(e))
46
+ return None # Indicates an error
47
+ else:
48
+ # Handle simple response validation
49
+ return handle_simple_response(response)
50
+
51
+
23
52
  def create_stub(auth_helper: ClarifaiAuthHelper = None, max_retry_attempts: int = 10) -> V2Stub:
24
53
  """
25
54
  Create client stub that handles authorization and basic retries for
@@ -109,10 +138,9 @@ class _RetryRpcCallable(RpcCallable):
109
138
  time.sleep(self.backoff_time) # TODO better backoff between attempts
110
139
  try:
111
140
  response = self.f(*args, **kwargs)
112
- if (response.status.code in throttle_status_codes) and attempt < self.max_attempts:
113
- logging.debug('Retrying with status %s' % str(response.status))
114
- else:
115
- return response
141
+ v = validate_response(response, attempt, self.max_attempts)
142
+ if v is not None:
143
+ return v
116
144
  except grpc.RpcError as e:
117
145
  if (e.code() in retry_codes_grpc) and attempt < self.max_attempts:
118
146
  logging.debug('Retrying with status %s' % e.code())
clarifai/client/base.py CHANGED
@@ -8,6 +8,7 @@ from google.protobuf.wrappers_pb2 import BoolValue
8
8
  from clarifai.client.auth import create_stub
9
9
  from clarifai.client.auth.helper import ClarifaiAuthHelper
10
10
  from clarifai.errors import ApiError, UserError
11
+ from clarifai.utils.constants import CLARIFAI_PAT_ENV_VAR, CLARIFAI_SESSION_TOKEN_ENV_VAR
11
12
  from clarifai.utils.misc import get_from_dict_or_env
12
13
 
13
14
 
@@ -36,9 +37,9 @@ class BaseClient:
36
37
  def __init__(self, **kwargs):
37
38
  token, pat = "", ""
38
39
  try:
39
- pat = get_from_dict_or_env(key="pat", env_key="CLARIFAI_PAT", **kwargs)
40
+ pat = get_from_dict_or_env(key="pat", env_key=CLARIFAI_PAT_ENV_VAR, **kwargs)
40
41
  except UserError:
41
- token = get_from_dict_or_env(key="token", env_key="CLARIFAI_SESSION_TOKEN", **kwargs)
42
+ token = get_from_dict_or_env(key="token", env_key=CLARIFAI_SESSION_TOKEN_ENV_VAR, **kwargs)
42
43
  finally:
43
44
  assert token or pat, Exception(
44
45
  "Need 'pat' or 'token' in args or use one of the CLARIFAI_PAT or CLARIFAI_SESSION_TOKEN env vars"
@@ -54,6 +55,11 @@ class BaseClient:
54
55
  self.base = self.auth_helper.base
55
56
  self.root_certificates_path = self.auth_helper._root_certificates_path
56
57
 
58
+ @classmethod
59
+ def from_env(cls, validate: bool = False):
60
+ auth = ClarifaiAuthHelper.from_env(validate=validate)
61
+ return cls.from_auth_helper(auth)
62
+
57
63
  @classmethod
58
64
  def from_auth_helper(cls, auth: ClarifaiAuthHelper, **kwargs):
59
65
  default_kwargs = {
clarifai/client/model.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import os
3
3
  import time
4
- from typing import Any, Dict, Generator, List, Tuple, Union
4
+ from typing import Any, Dict, Generator, Iterator, List, Tuple, Union
5
5
 
6
6
  import numpy as np
7
7
  import requests
@@ -553,6 +553,315 @@ class Model(Lister, BaseClient):
553
553
  return self.predict(
554
554
  inputs=[input_proto], inference_params=inference_params, output_config=output_config)
555
555
 
556
+ def generate(self, inputs: List[Input], inference_params: Dict = {}, output_config: Dict = {}):
557
+ """Generate the stream output on model based on the given inputs.
558
+
559
+ Args:
560
+ inputs (list[Input]): The inputs to predict, must be less than 128.
561
+ """
562
+ if not isinstance(inputs, list):
563
+ raise UserError('Invalid inputs, inputs must be a list of Input objects.')
564
+ if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
565
+ raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
566
+ ) # TODO Use Chunker for inputs len > 128
567
+
568
+ self._override_model_version(inference_params, output_config)
569
+ request = service_pb2.PostModelOutputsRequest(
570
+ user_app_id=self.user_app_id,
571
+ model_id=self.id,
572
+ version_id=self.model_version.id,
573
+ inputs=inputs,
574
+ model=self.model_info)
575
+
576
+ start_time = time.time()
577
+ backoff_iterator = BackoffIterator(10)
578
+ generation_started = False
579
+ while True:
580
+ if generation_started:
581
+ break
582
+ stream_response = self._grpc_request(self.STUB.GenerateModelOutputs, request)
583
+ for response in stream_response:
584
+ if response.status.code == status_code_pb2.MODEL_DEPLOYING and \
585
+ time.time() - start_time < 60 * 10:
586
+ self.logger.info(f"{self.id} model is still deploying, please wait...")
587
+ time.sleep(next(backoff_iterator))
588
+ break
589
+ if response.status.code != status_code_pb2.SUCCESS:
590
+ raise Exception(f"Model Predict failed with response {response.status!r}")
591
+ else:
592
+ if not generation_started:
593
+ generation_started = True
594
+ yield response
595
+
596
+ def generate_by_filepath(self,
597
+ filepath: str,
598
+ input_type: str,
599
+ inference_params: Dict = {},
600
+ output_config: Dict = {}):
601
+ """Generate the stream output on model based on the given filepath.
602
+
603
+ Args:
604
+ filepath (str): The filepath to predict.
605
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
606
+ inference_params (dict): The inference params to override.
607
+ output_config (dict): The output config to override.
608
+ min_value (float): The minimum value of the prediction confidence to filter.
609
+ max_concepts (int): The maximum number of concepts to return.
610
+ select_concepts (list[Concept]): The concepts to select.
611
+
612
+ Example:
613
+ >>> from clarifai.client.model import Model
614
+ >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
615
+ or
616
+ >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
617
+ >>> stream_response = model.generate_by_filepath('/path/to/image.jpg', 'image')
618
+ >>> stream_response = model.generate_by_filepath('/path/to/text.txt', 'text')
619
+ >>> list_stream_response = [response for response in stream_response]
620
+ """
621
+ if not os.path.isfile(filepath):
622
+ raise UserError('Invalid filepath.')
623
+
624
+ with open(filepath, "rb") as f:
625
+ file_bytes = f.read()
626
+
627
+ return self.generate_by_bytes(file_bytes, input_type, inference_params, output_config)
628
+
629
+ def generate_by_bytes(self,
630
+ input_bytes: bytes,
631
+ input_type: str,
632
+ inference_params: Dict = {},
633
+ output_config: Dict = {}):
634
+ """Generate the stream output on model based on the given bytes.
635
+
636
+ Args:
637
+ input_bytes (bytes): File Bytes to predict on.
638
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
639
+ inference_params (dict): The inference params to override.
640
+ output_config (dict): The output config to override.
641
+ min_value (float): The minimum value of the prediction confidence to filter.
642
+ max_concepts (int): The maximum number of concepts to return.
643
+ select_concepts (list[Concept]): The concepts to select.
644
+
645
+ Example:
646
+ >>> from clarifai.client.model import Model
647
+ >>> model = Model("https://clarifai.com/openai/chat-completion/models/GPT-4")
648
+ >>> stream_response = model.generate_by_bytes(b'Write a tweet on future of AI',
649
+ input_type='text',
650
+ inference_params=dict(temperature=str(0.7), max_tokens=30)))
651
+ >>> list_stream_response = [response for response in stream_response]
652
+ """
653
+ if input_type not in {'image', 'text', 'video', 'audio'}:
654
+ raise UserError(
655
+ f"Got input type {input_type} but expected one of image, text, video, audio.")
656
+ if not isinstance(input_bytes, bytes):
657
+ raise UserError('Invalid bytes.')
658
+
659
+ if input_type == "image":
660
+ input_proto = Inputs.get_input_from_bytes("", image_bytes=input_bytes)
661
+ elif input_type == "text":
662
+ input_proto = Inputs.get_input_from_bytes("", text_bytes=input_bytes)
663
+ elif input_type == "video":
664
+ input_proto = Inputs.get_input_from_bytes("", video_bytes=input_bytes)
665
+ elif input_type == "audio":
666
+ input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
667
+
668
+ return self.generate(
669
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
670
+
671
+ def generate_by_url(self,
672
+ url: str,
673
+ input_type: str,
674
+ inference_params: Dict = {},
675
+ output_config: Dict = {}):
676
+ """Generate the stream output on model based on the given URL.
677
+
678
+ Args:
679
+ url (str): The URL to predict.
680
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
681
+ inference_params (dict): The inference params to override.
682
+ output_config (dict): The output config to override.
683
+ min_value (float): The minimum value of the prediction confidence to filter.
684
+ max_concepts (int): The maximum number of concepts to return.
685
+ select_concepts (list[Concept]): The concepts to select.
686
+
687
+ Example:
688
+ >>> from clarifai.client.model import Model
689
+ >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
690
+ or
691
+ >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
692
+ >>> stream_response = model.generate_by_url('url', 'image')
693
+ >>> list_stream_response = [response for response in stream_response]
694
+ """
695
+ if input_type not in {'image', 'text', 'video', 'audio'}:
696
+ raise UserError(
697
+ f"Got input type {input_type} but expected one of image, text, video, audio.")
698
+
699
+ if input_type == "image":
700
+ input_proto = Inputs.get_input_from_url("", image_url=url)
701
+ elif input_type == "text":
702
+ input_proto = Inputs.get_input_from_url("", text_url=url)
703
+ elif input_type == "video":
704
+ input_proto = Inputs.get_input_from_url("", video_url=url)
705
+ elif input_type == "audio":
706
+ input_proto = Inputs.get_input_from_url("", audio_url=url)
707
+
708
+ return self.generate(
709
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
710
+
711
+ def _req_iterator(self, input_iterator: Iterator[List[Input]]):
712
+ for inputs in input_iterator:
713
+ yield service_pb2.PostModelOutputsRequest(
714
+ user_app_id=self.user_app_id,
715
+ model_id=self.id,
716
+ version_id=self.model_version.id,
717
+ inputs=inputs,
718
+ model=self.model_info)
719
+
720
+ def stream(self,
721
+ inputs: Iterator[List[Input]],
722
+ inference_params: Dict = {},
723
+ output_config: Dict = {}):
724
+ """Generate the stream output on model based on the given stream of inputs.
725
+
726
+ Args:
727
+ inputs (Iterator[list[Input]]): stream of inputs to predict, must be less than 128.
728
+ """
729
+ # if not isinstance(inputs, Iterator[List[Input]]):
730
+ # raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
731
+
732
+ self._override_model_version(inference_params, output_config)
733
+ request = self._req_iterator(inputs)
734
+
735
+ start_time = time.time()
736
+ backoff_iterator = BackoffIterator(10)
737
+ generation_started = False
738
+ while True:
739
+ if generation_started:
740
+ break
741
+ stream_response = self._grpc_request(self.STUB.StreamModelOutputs, request)
742
+ for response in stream_response:
743
+ if response.status.code == status_code_pb2.MODEL_DEPLOYING and \
744
+ time.time() - start_time < 60 * 10:
745
+ self.logger.info(f"{self.id} model is still deploying, please wait...")
746
+ time.sleep(next(backoff_iterator))
747
+ break
748
+ if response.status.code != status_code_pb2.SUCCESS:
749
+ raise Exception(f"Model Predict failed with response {response.status!r}")
750
+ else:
751
+ if not generation_started:
752
+ generation_started = True
753
+ yield response
754
+
755
+ def stream_by_filepath(self,
756
+ filepath: str,
757
+ input_type: str,
758
+ inference_params: Dict = {},
759
+ output_config: Dict = {}):
760
+ """Stream the model output based on the given filepath.
761
+
762
+ Args:
763
+ filepath (str): The filepath to predict.
764
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
765
+ inference_params (dict): The inference params to override.
766
+ output_config (dict): The output config to override.
767
+ min_value (float): The minimum value of the prediction confidence to filter.
768
+ max_concepts (int): The maximum number of concepts to return.
769
+ select_concepts (list[Concept]): The concepts to select.
770
+
771
+ Example:
772
+ >>> from clarifai.client.model import Model
773
+ >>> model = Model("url")
774
+ >>> stream_response = model.stream_by_filepath('/path/to/image.jpg', 'image')
775
+ >>> list_stream_response = [response for response in stream_response]
776
+ """
777
+ if not os.path.isfile(filepath):
778
+ raise UserError('Invalid filepath.')
779
+
780
+ with open(filepath, "rb") as f:
781
+ file_bytes = f.read()
782
+
783
+ return self.stream_by_bytes(iter([file_bytes]), input_type, inference_params, output_config)
784
+
785
+ def stream_by_bytes(self,
786
+ input_bytes_iterator: Iterator[bytes],
787
+ input_type: str,
788
+ inference_params: Dict = {},
789
+ output_config: Dict = {}):
790
+ """Stream the model output based on the given bytes.
791
+
792
+ Args:
793
+ input_bytes_iterator (Iterator[bytes]): Iterator of file bytes to predict on.
794
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
795
+ inference_params (dict): The inference params to override.
796
+ output_config (dict): The output config to override.
797
+ min_value (float): The minimum value of the prediction confidence to filter.
798
+ max_concepts (int): The maximum number of concepts to return.
799
+ select_concepts (list[Concept]): The concepts to select.
800
+
801
+ Example:
802
+ >>> from clarifai.client.model import Model
803
+ >>> model = Model("https://clarifai.com/openai/chat-completion/models/GPT-4")
804
+ >>> stream_response = model.stream_by_bytes(iter([b'Write a tweet on future of AI']),
805
+ input_type='text',
806
+ inference_params=dict(temperature=str(0.7), max_tokens=30)))
807
+ >>> list_stream_response = [response for response in stream_response]
808
+ """
809
+ if input_type not in {'image', 'text', 'video', 'audio'}:
810
+ raise UserError(
811
+ f"Got input type {input_type} but expected one of image, text, video, audio.")
812
+
813
+ def input_generator():
814
+ for input_bytes in input_bytes_iterator:
815
+ if input_type == "image":
816
+ yield [Inputs.get_input_from_bytes("", image_bytes=input_bytes)]
817
+ elif input_type == "text":
818
+ yield [Inputs.get_input_from_bytes("", text_bytes=input_bytes)]
819
+ elif input_type == "video":
820
+ yield [Inputs.get_input_from_bytes("", video_bytes=input_bytes)]
821
+ elif input_type == "audio":
822
+ yield [Inputs.get_input_from_bytes("", audio_bytes=input_bytes)]
823
+
824
+ return self.stream(input_generator(), inference_params, output_config)
825
+
826
+ def stream_by_url(self,
827
+ url_iterator: Iterator[str],
828
+ input_type: str,
829
+ inference_params: Dict = {},
830
+ output_config: Dict = {}):
831
+ """Stream the model output based on the given URL.
832
+
833
+ Args:
834
+ url_iterator (Iterator[str]): Iterator of URLs to predict.
835
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
836
+ inference_params (dict): The inference params to override.
837
+ output_config (dict): The output config to override.
838
+ min_value (float): The minimum value of the prediction confidence to filter.
839
+ max_concepts (int): The maximum number of concepts to return.
840
+ select_concepts (list[Concept]): The concepts to select.
841
+
842
+ Example:
843
+ >>> from clarifai.client.model import Model
844
+ >>> model = Model("url")
845
+ >>> stream_response = model.stream_by_url(iter(['url']), 'image')
846
+ >>> list_stream_response = [response for response in stream_response]
847
+ """
848
+ if input_type not in {'image', 'text', 'video', 'audio'}:
849
+ raise UserError(
850
+ f"Got input type {input_type} but expected one of image, text, video, audio.")
851
+
852
+ def input_generator():
853
+ for url in url_iterator:
854
+ if input_type == "image":
855
+ yield [Inputs.get_input_from_url("", image_url=url)]
856
+ elif input_type == "text":
857
+ yield [Inputs.get_input_from_url("", text_url=url)]
858
+ elif input_type == "video":
859
+ yield [Inputs.get_input_from_url("", video_url=url)]
860
+ elif input_type == "audio":
861
+ yield [Inputs.get_input_from_url("", audio_url=url)]
862
+
863
+ return self.stream(input_generator(), inference_params, output_config)
864
+
556
865
  def _override_model_version(self, inference_params: Dict = {}, output_config: Dict = {}) -> None:
557
866
  """Overrides the model version.
558
867
 
@@ -1,6 +1,6 @@
1
1
  import argparse
2
2
 
3
- from ..constants import CLARIFAI_PAT_PATH
3
+ from clarifai.utils.constants import CLARIFAI_PAT_PATH
4
4
  from ..utils import _persist_pat
5
5
  from .base import BaseClarifaiCli
6
6
 
@@ -97,11 +97,15 @@ class UploadModelSubCli(BaseClarifaiCli):
97
97
  self.file = args.file
98
98
  self.url = args.url
99
99
  if self.file:
100
- assert self.url, ValueError("Provide either file or url, but got both.")
100
+ assert not self.url, ValueError("Expected either file or url, not both.")
101
101
  assert os.path.exists(self.file), FileNotFoundError
102
102
  elif self.url:
103
- assert self.url.startswith("http") or self.url.startswith(
104
- "s3"), f"Invalid url supported http or s3 url. Got {self.url}"
103
+ if len(self.url.split(":")) == 1:
104
+ # if URL has no scheme, default to https
105
+ self.url = f"https://{self.url}"
106
+ assert self.url.startswith("http") or self.url.startswith("https") or self.url.startswith(
107
+ "s3"
108
+ ), f"Invalid URL scheme, supported schemes are 'http', 'https', or 's3'. Got {self.url}"
105
109
  self.file = None
106
110
  else:
107
111
  for _fname in os.listdir(working_dir_or_config):
@@ -1,20 +1,13 @@
1
1
  import os
2
2
 
3
+ from clarifai.utils.constants import CLARIFAI_HOME
4
+
3
5
  MAX_HW_DIM = 1024
4
6
  IMAGE_TENSOR_NAME = "image"
5
7
  TEXT_TENSOR_NAME = "text"
6
8
 
7
9
  BUILT_MODEL_EXT = ".clarifai"
8
10
 
9
- default_home = os.path.join(os.path.expanduser("~"), ".cache")
10
- CLARIFAI_HOME = os.path.expanduser(
11
- os.getenv(
12
- "CLARIFAI_HOME",
13
- os.path.join(os.getenv("XDG_CACHE_HOME", default_home), "clarifai"),
14
- ))
15
- os.makedirs(CLARIFAI_HOME, exist_ok=True)
16
- CLARIFAI_PAT_PATH = os.path.join(CLARIFAI_HOME, "pat")
17
-
18
11
  CLARIFAI_EXAMPLES_REPO = "https://github.com/Clarifai/examples.git"
19
12
  repo_name = CLARIFAI_EXAMPLES_REPO.split("/")[-1].replace(".git", "")
20
13
  CLARIFAI_EXAMPLES_REPO_PATH = os.path.join(CLARIFAI_HOME, repo_name)
@@ -1,6 +1,6 @@
1
1
  import os
2
2
 
3
- from .constants import CLARIFAI_PAT_PATH
3
+ from clarifai.utils.constants import CLARIFAI_PAT_ENV_VAR, CLARIFAI_PAT_PATH
4
4
 
5
5
 
6
6
  def _persist_pat(pat: str):
@@ -10,10 +10,17 @@ def _persist_pat(pat: str):
10
10
 
11
11
 
12
12
  def _read_pat():
13
- if not os.path.exists(CLARIFAI_PAT_PATH):
13
+ if not os.path.exists(CLARIFAI_PAT_PATH) and not os.environ.get(CLARIFAI_PAT_ENV_VAR, ""):
14
14
  return None
15
- with open(CLARIFAI_PAT_PATH, "r") as f:
16
- return f.read().replace("\n", "").replace("\r", "").strip()
15
+ if os.path.exists(CLARIFAI_PAT_PATH):
16
+ with open(CLARIFAI_PAT_PATH, "r") as f:
17
+ return f.read().replace("\n", "").replace("\r", "").strip()
18
+ elif os.environ.get(CLARIFAI_PAT_ENV_VAR):
19
+ return os.environ.get(CLARIFAI_PAT_ENV_VAR)
20
+ else:
21
+ raise ValueError(
22
+ f"PAT not found, please run `clarifai login` to persist your PAT or set it as an environment variable under the name '{CLARIFAI_PAT_ENV_VAR}'"
23
+ )
17
24
 
18
25
 
19
26
  def login(pat=None):
@@ -0,0 +1,12 @@
1
+ import os
2
+
3
+ USER_CACHE_DIR = os.path.join(os.path.expanduser("~"), ".cache")
4
+ CLARIFAI_HOME = os.path.expanduser(
5
+ os.getenv(
6
+ "CLARIFAI_HOME",
7
+ os.path.join(os.getenv("XDG_CACHE_HOME", USER_CACHE_DIR), "clarifai"),
8
+ ))
9
+ os.makedirs(CLARIFAI_HOME, exist_ok=True)
10
+ CLARIFAI_PAT_PATH = os.path.join(CLARIFAI_HOME, "pat")
11
+ CLARIFAI_PAT_ENV_VAR = "CLARIFAI_PAT"
12
+ CLARIFAI_SESSION_TOKEN_ENV_VAR = "CLARIFAI_SESSION_TOKEN"
clarifai/versions.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import os
2
2
 
3
- CLIENT_VERSION = "10.5.0"
3
+ CLIENT_VERSION = "10.5.1"
4
4
  OS_VER = os.sys.platform
5
5
  PYTHON_VERSION = '.'.join(
6
6
  map(str, [os.sys.version_info.major, os.sys.version_info.minor, os.sys.version_info.micro]))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: clarifai
3
- Version: 10.5.0
3
+ Version: 10.5.1
4
4
  Summary: Clarifai Python SDK
5
5
  Home-page: https://github.com/Clarifai/clarifai-python
6
6
  Author: Clarifai
@@ -1,22 +1,22 @@
1
1
  clarifai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  clarifai/errors.py,sha256=RwzTajwds51wLD0MVlMC5kcpBnzRpreDLlazPSBZxrg,2605
4
- clarifai/versions.py,sha256=45yN_CYVHYMj9_5f2wK7IgHdCYaA5KHH9NBdC2-5rj0,186
4
+ clarifai/versions.py,sha256=tIgKRwGSVz7SowiYM6ZS42kkJr2EJxVJ-4slCa28Dt0,186
5
5
  clarifai/client/__init__.py,sha256=xI1U0l5AZdRThvQAXCLsd9axxyFzXXJ22m8LHqVjQRU,662
6
- clarifai/client/app.py,sha256=LC3rnuqr97f-S7LsJ9Q7KZnYMBHGDAq4mcFjjcPjpqo,27240
7
- clarifai/client/base.py,sha256=FrnSY9tSxjTxhABfBzQz5-PEppWMPbIyvSNnx8mVz8s,6919
6
+ clarifai/client/app.py,sha256=br3C2paGmWzKu4eG0kUN1m8kDPKqpGPq28f7Xiy2v54,27329
7
+ clarifai/client/base.py,sha256=wStnn_gS6sYo36OlYRzEkOFShXOQg3DKUp8i6DomAxQ,7178
8
8
  clarifai/client/dataset.py,sha256=XX-J-9Ict1CQrEycq-JbdxUTuucSgLeDSvnlHE1ucQY,29903
9
9
  clarifai/client/input.py,sha256=Av_gPrmwa1vorDs5Pz9jUbY1MwXHYFb3NyF_a1S1aII,41630
10
10
  clarifai/client/lister.py,sha256=03KGMvs5RVyYqxLsSrWhNc34I8kiF1Ph0NeyEwu7nMU,2082
11
- clarifai/client/model.py,sha256=QTVSeR3D3SHh8rK6kWHATy87qS8khYeXIKbs9dn5W4I,58487
11
+ clarifai/client/model.py,sha256=Q0XEOWaZvSFPx7cLp4xJcwV5SVD1iU_6-DdDJmF-Hfk,72623
12
12
  clarifai/client/module.py,sha256=360JaOasX0DZCNE_Trj0LNTr-T_tUDZLfGpz0CdIi78,4248
13
13
  clarifai/client/search.py,sha256=iwZqwuEodbjIOEPMIjpic8caFGg3u51RK816pr-574o,14964
14
14
  clarifai/client/user.py,sha256=EQTeotfYTNedGcbTICYOUJqKgWhfVHvaMRTJ1hdoIdQ,10372
15
15
  clarifai/client/workflow.py,sha256=e3axkhU6c6WcxK9P5tgmnV464k-afslSzsSXx6nSMgA,10560
16
16
  clarifai/client/auth/__init__.py,sha256=7EwR0NrozkAUwpUnCsqXvE_p0wqx_SelXlSpKShKJK0,136
17
- clarifai/client/auth/helper.py,sha256=Wa5gkX0dl0xBwrT0E10lv9_Eoeh-WrMZR0Qc0YcV7es,14225
17
+ clarifai/client/auth/helper.py,sha256=5aH2OjiWyuJk2K-1mfvXaD_OxEC014JEilUCpTwzCuY,14081
18
18
  clarifai/client/auth/register.py,sha256=2CMdBsoVLoTfjyksE6j7BM2tiEc73WKYvxnwDDgNn1k,536
19
- clarifai/client/auth/stub.py,sha256=KIzJZ8aRB1RzXJeWHDAx19HNdBsblPPHwYLfAkgI3rY,3779
19
+ clarifai/client/auth/stub.py,sha256=xy4-fV0W8keCgXld4eOVzFQEIKxOktNwtL5bLztReug,4940
20
20
  clarifai/constants/dataset.py,sha256=OXYirr0iaoN_47V6wxO0H6ptV81y8zNGapPBz9qqD8o,516
21
21
  clarifai/constants/input.py,sha256=WcHwToUVIK9ItAhDefaSohQHCLNeR55PSjZ0BFnoZ3U,28
22
22
  clarifai/constants/model.py,sha256=oTad43ncskVHfQ9vEbL2yy0Fac666dXr7QuO8zZXHAE,245
@@ -42,8 +42,8 @@ clarifai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  clarifai/models/api.py,sha256=d3FQQlG0mNDLrfEvchqaVcq4Tgb_TqryNnJtwp3c7sE,10961
43
43
  clarifai/models/model_serving/README.md,sha256=zXnKybVoIF_LYHKKY2vijTCaGcb2-GJ5kef2uB1WFrs,4241
44
44
  clarifai/models/model_serving/__init__.py,sha256=78fiK9LvdGvpMxICmZWqSIyS6BFATjW2s5R6_GgtbPA,645
45
- clarifai/models/model_serving/constants.py,sha256=uoi8TqEFkdsHhSZu90HOO3R0BmPC3G0z9qA5ER-5H7w,688
46
- clarifai/models/model_serving/utils.py,sha256=D2UZo90Afd9f7OGKXdjRqys_6N26psY018V7R-rcNO4,629
45
+ clarifai/models/model_serving/constants.py,sha256=VeSkzXijU9E0axBUbGs4pQG9V7lA-G8bYVVfxrPgB9U,419
46
+ clarifai/models/model_serving/utils.py,sha256=zbFKpQ3Xom-zvNP9bqobIxXFdlCbxBRuGr7RFlg334Y,1038
47
47
  clarifai/models/model_serving/cli/__init__.py,sha256=Nls28G-fedNw2oQZIkPQSN__TgjJXbG9RDzzuHIM0VI,575
48
48
  clarifai/models/model_serving/cli/_utils.py,sha256=CZTKKiaoO1Mg5MKQS2Qhgy4JRjnkEHqy8zY5U6b6C0w,1734
49
49
  clarifai/models/model_serving/cli/base.py,sha256=k4ARNU1koNzGAi9ach6Vpk7hpISZySiYHyKjkBLuHLg,283
@@ -51,8 +51,8 @@ clarifai/models/model_serving/cli/build.py,sha256=-C4PBt-9xO9YsyUagz3kF4J0_PsYb6
51
51
  clarifai/models/model_serving/cli/clarifai_clis.py,sha256=sGDDj7MrlU3goWLQm4H9dCf4lPD2Ojx50_jdIoxb5QM,663
52
52
  clarifai/models/model_serving/cli/create.py,sha256=wtKcVi8XSPN-Fx0RrSUxEwH1hm5TbZ_FrCEMIS9yszM,5598
53
53
  clarifai/models/model_serving/cli/example_cli.py,sha256=tCm0J4EI0kuuSRhEiPTuraSA-bUYwtEFEHcL1eOXzRI,1039
54
- clarifai/models/model_serving/cli/login.py,sha256=TYRQALJZUhNvtx2VcChO0y41YXs8-yP9BrShYb9tcOM,743
55
- clarifai/models/model_serving/cli/upload.py,sha256=aLq5Fjc2av0-MRnE6lSWmBYxvm5yseIekWLHGXAhsb8,6813
54
+ clarifai/models/model_serving/cli/login.py,sha256=yNHedXKqqw_fxECaEDrWKPLCvOgGdHjNQv3MuRZpMSg,756
55
+ clarifai/models/model_serving/cli/upload.py,sha256=kOz8OOEobo6sLUkS1xg0672PTmMkx0aWxjKMhSRlMwM,7013
56
56
  clarifai/models/model_serving/docs/cli.md,sha256=fLgyY8sYMPjYQW_q8Q9yJYB_ryDVGbzj2VouJgvkEFw,4564
57
57
  clarifai/models/model_serving/docs/concepts.md,sha256=ppQADibKQInf9JpfcH7wIpcMndTZ3618or5yzMhGNOE,9376
58
58
  clarifai/models/model_serving/docs/dependencies.md,sha256=apwg_IxDBzovtQYXRpWMU9pUqdf0VaS10yMVOYYXhoc,728
@@ -95,6 +95,7 @@ clarifai/rag/utils.py,sha256=yr1jAcbpws4vFGBqlAwPPE7v1DRba48g8gixLFw8OhQ,4070
95
95
  clarifai/schema/search.py,sha256=JjTi8ammJgZZ2OGl4K6tIA4zEJ1Fr2ASZARXavI1j5c,2448
96
96
  clarifai/urls/helper.py,sha256=tjoMGGHuWX68DUB0pk4MEjrmFsClUAQj2jmVEM_Sy78,4751
97
97
  clarifai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
98
+ clarifai/utils/constants.py,sha256=SJ6FzfvtA2Pe3QxKtNnnxxAbvetbh7v3wGCTXip7z_0,431
98
99
  clarifai/utils/logging.py,sha256=xJTteoUodQ7RfsbO676QgidKa5EVPbdUu89Xlwwso2s,4533
99
100
  clarifai/utils/misc.py,sha256=GznzquXXFt8J9qzMWtTJPFWCSc5QTs_ZBldW1mXCZzE,1285
100
101
  clarifai/utils/model_train.py,sha256=Mndqy5GNu7kjQHjDyNVyamL0hQFLGSHcWhOuPyOvr1w,8005
@@ -106,9 +107,9 @@ clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
106
107
  clarifai/workflows/export.py,sha256=vICRhIreqDSShxLKjHNM2JwzKsf1B4fdXB0ciMcA70k,1945
107
108
  clarifai/workflows/utils.py,sha256=nGeB_yjVgUO9kOeKTg4OBBaBz-AwXI3m-huSVj-9W18,1924
108
109
  clarifai/workflows/validate.py,sha256=yJq03MaJqi5AK3alKGJJBR89xmmjAQ31sVufJUiOqY8,2556
109
- clarifai-10.5.0.dist-info/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
110
- clarifai-10.5.0.dist-info/METADATA,sha256=XR2mF-8uqgbOp-nJXh97xrzITiszzpnq0D7xffqUryM,19372
111
- clarifai-10.5.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
112
- clarifai-10.5.0.dist-info/entry_points.txt,sha256=qZOr_MIPG0dBBE1zringDJS_wXNGTAA_SQ-zcbmDHOw,82
113
- clarifai-10.5.0.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
114
- clarifai-10.5.0.dist-info/RECORD,,
110
+ clarifai-10.5.1.dist-info/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
111
+ clarifai-10.5.1.dist-info/METADATA,sha256=f1Ga6-33hEMjW56xOUGAeZIELIEJx_m6HOYj2p9ni78,19372
112
+ clarifai-10.5.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
113
+ clarifai-10.5.1.dist-info/entry_points.txt,sha256=qZOr_MIPG0dBBE1zringDJS_wXNGTAA_SQ-zcbmDHOw,82
114
+ clarifai-10.5.1.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
115
+ clarifai-10.5.1.dist-info/RECORD,,