clarifai 10.5.0__tar.gz → 10.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. {clarifai-10.5.0/clarifai.egg-info → clarifai-10.5.1}/PKG-INFO +1 -1
  2. clarifai-10.5.1/VERSION +1 -0
  3. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/app.py +4 -0
  4. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/auth/helper.py +0 -3
  5. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/auth/stub.py +32 -4
  6. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/base.py +8 -2
  7. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/model.py +310 -1
  8. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/cli/login.py +1 -1
  9. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/cli/upload.py +7 -3
  10. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/constants.py +2 -9
  11. clarifai-10.5.1/clarifai/models/model_serving/utils.py +30 -0
  12. clarifai-10.5.1/clarifai/utils/constants.py +12 -0
  13. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/versions.py +1 -1
  14. {clarifai-10.5.0 → clarifai-10.5.1/clarifai.egg-info}/PKG-INFO +1 -1
  15. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai.egg-info/SOURCES.txt +1 -0
  16. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_auth.py +0 -8
  17. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_search.py +3 -3
  18. clarifai-10.5.0/VERSION +0 -1
  19. clarifai-10.5.0/clarifai/models/model_serving/utils.py +0 -23
  20. {clarifai-10.5.0 → clarifai-10.5.1}/LICENSE +0 -0
  21. {clarifai-10.5.0 → clarifai-10.5.1}/MANIFEST.in +0 -0
  22. {clarifai-10.5.0 → clarifai-10.5.1}/README.md +0 -0
  23. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/__init__.py +0 -0
  24. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/cli.py +0 -0
  25. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/__init__.py +0 -0
  26. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/auth/__init__.py +0 -0
  27. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/auth/register.py +0 -0
  28. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/dataset.py +0 -0
  29. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/input.py +0 -0
  30. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/lister.py +0 -0
  31. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/module.py +0 -0
  32. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/search.py +0 -0
  33. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/user.py +0 -0
  34. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/client/workflow.py +0 -0
  35. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/constants/dataset.py +0 -0
  36. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/constants/input.py +0 -0
  37. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/constants/model.py +0 -0
  38. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/constants/rag.py +0 -0
  39. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/constants/search.py +0 -0
  40. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/constants/workflow.py +0 -0
  41. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/__init__.py +0 -0
  42. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/export/__init__.py +0 -0
  43. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/export/inputs_annotations.py +0 -0
  44. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/__init__.py +0 -0
  45. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/base.py +0 -0
  46. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/features.py +0 -0
  47. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/image.py +0 -0
  48. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/loaders/README.md +0 -0
  49. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/loaders/__init__.py +0 -0
  50. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/loaders/coco_captions.py +0 -0
  51. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/loaders/coco_detection.py +0 -0
  52. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/loaders/imagenet_classification.py +0 -0
  53. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/loaders/xview_detection.py +0 -0
  54. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/text.py +0 -0
  55. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/datasets/upload/utils.py +0 -0
  56. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/errors.py +0 -0
  57. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/__init__.py +0 -0
  58. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/api.py +0 -0
  59. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/README.md +0 -0
  60. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/__init__.py +0 -0
  61. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/cli/__init__.py +0 -0
  62. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/cli/_utils.py +0 -0
  63. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/cli/base.py +0 -0
  64. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/cli/build.py +0 -0
  65. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/cli/clarifai_clis.py +0 -0
  66. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/cli/create.py +0 -0
  67. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/cli/example_cli.py +0 -0
  68. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/docs/cli.md +0 -0
  69. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/docs/concepts.md +0 -0
  70. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/docs/dependencies.md +0 -0
  71. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/docs/inference_parameters.md +0 -0
  72. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/docs/model_types.md +0 -0
  73. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/__init__.py +0 -0
  74. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/base.py +0 -0
  75. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/config.py +0 -0
  76. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/inference_parameter.py +0 -0
  77. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -0
  78. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -0
  79. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -0
  80. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -0
  81. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -0
  82. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -0
  83. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -0
  84. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -0
  85. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -0
  86. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/output.py +0 -0
  87. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/triton/__init__.py +0 -0
  88. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/triton/serializer.py +0 -0
  89. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/triton/triton_config.py +0 -0
  90. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/model_config/triton/wrappers.py +0 -0
  91. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/repo_build/__init__.py +0 -0
  92. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/repo_build/build.py +0 -0
  93. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/repo_build/static_files/_requirements.txt +0 -0
  94. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/repo_build/static_files/base_test.py +0 -0
  95. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/repo_build/static_files/inference.py +0 -0
  96. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +0 -0
  97. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/repo_build/static_files/test.py +0 -0
  98. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/models/model_serving/repo_build/static_files/triton/model.py +0 -0
  99. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/modules/README.md +0 -0
  100. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/modules/__init__.py +0 -0
  101. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/modules/css.py +0 -0
  102. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/modules/pages.py +0 -0
  103. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/modules/style.css +0 -0
  104. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/rag/__init__.py +0 -0
  105. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/rag/rag.py +0 -0
  106. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/rag/utils.py +0 -0
  107. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/schema/search.py +0 -0
  108. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/urls/helper.py +0 -0
  109. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/utils/__init__.py +0 -0
  110. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/utils/evaluation/__init__.py +0 -0
  111. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/utils/evaluation/helpers.py +0 -0
  112. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/utils/evaluation/main.py +0 -0
  113. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/utils/evaluation/testset_annotation_parser.py +0 -0
  114. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/utils/logging.py +0 -0
  115. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/utils/misc.py +0 -0
  116. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/utils/model_train.py +0 -0
  117. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/workflows/__init__.py +0 -0
  118. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/workflows/export.py +0 -0
  119. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/workflows/utils.py +0 -0
  120. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai/workflows/validate.py +0 -0
  121. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai.egg-info/dependency_links.txt +0 -0
  122. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai.egg-info/entry_points.txt +0 -0
  123. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai.egg-info/requires.txt +0 -0
  124. {clarifai-10.5.0 → clarifai-10.5.1}/clarifai.egg-info/top_level.txt +0 -0
  125. {clarifai-10.5.0 → clarifai-10.5.1}/pyproject.toml +0 -0
  126. {clarifai-10.5.0 → clarifai-10.5.1}/requirements.txt +0 -0
  127. {clarifai-10.5.0 → clarifai-10.5.1}/setup.cfg +0 -0
  128. {clarifai-10.5.0 → clarifai-10.5.1}/setup.py +0 -0
  129. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_app.py +0 -0
  130. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_data_upload.py +0 -0
  131. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_eval.py +0 -0
  132. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_misc.py +0 -0
  133. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_model_predict.py +0 -0
  134. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_model_train.py +0 -0
  135. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_modules.py +0 -0
  136. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_rag.py +0 -0
  137. {clarifai-10.5.0 → clarifai-10.5.1}/tests/test_stub.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: clarifai
3
- Version: 10.5.0
3
+ Version: 10.5.1
4
4
  Summary: Clarifai Python SDK
5
5
  Home-page: https://github.com/Clarifai/clarifai-python
6
6
  Author: Clarifai
@@ -0,0 +1 @@
1
+ 10.5.1
@@ -30,6 +30,7 @@ class App(Lister, BaseClient):
30
30
  def __init__(self,
31
31
  url: str = None,
32
32
  app_id: str = None,
33
+ user_id: str = None,
33
34
  base_url: str = "https://api.clarifai.com",
34
35
  pat: str = None,
35
36
  token: str = None,
@@ -53,6 +54,9 @@ class App(Lister, BaseClient):
53
54
  if url:
54
55
  user_id, app_id = ClarifaiUrlHelper.split_clarifai_app_url(url)
55
56
  kwargs = {'user_id': user_id}
57
+ if user_id:
58
+ kwargs = {'user_id': user_id}
59
+
56
60
  self.kwargs = {**kwargs, 'id': app_id}
57
61
  self.app_info = resources_pb2.App(**self.kwargs)
58
62
  self.logger = get_logger(logger_level="INFO", name=__name__)
@@ -105,9 +105,6 @@ class ClarifaiAuthHelper:
105
105
  if self.user_id == "":
106
106
  raise Exception(
107
107
  "Need 'user_id' to not be empty in the query params or user CLARIFAI_USER_ID env var")
108
- if self.app_id == "":
109
- raise Exception(
110
- "Need 'app_id' to not be empty in the query params or user CLARIFAI_APP_ID env var")
111
108
  if self._pat != "" and self._token != "":
112
109
  raise Exception(
113
110
  "A personal access token OR a session token need to be provided, but you cannot provide both."
@@ -1,3 +1,4 @@
1
+ import itertools
1
2
  import logging
2
3
  import time
3
4
  from concurrent.futures import ThreadPoolExecutor
@@ -20,6 +21,34 @@ retry_codes_grpc = {
20
21
  _threadpool = ThreadPoolExecutor(100)
21
22
 
22
23
 
24
+ def validate_response(response, attempt, max_attempts):
25
+ # Helper function to handle simple response validation
26
+ def handle_simple_response(response):
27
+ if hasattr(response, 'status') and hasattr(response.status, 'code'):
28
+ if (response.status.code in throttle_status_codes) and attempt < max_attempts:
29
+ logging.debug('Retrying with status %s' % str(response.status))
30
+ return None # Indicates a retry is needed
31
+ else:
32
+ return response
33
+
34
+ # Check if the response is an instance of a gRPC streaming call
35
+ if isinstance(response, grpc._channel._MultiThreadedRendezvous):
36
+ try:
37
+ # Check just the first response in the stream for validation
38
+ first_res = next(response)
39
+ validated_response = handle_simple_response(first_res)
40
+ if validated_response is not None:
41
+ # Have to return that first response and the rest of the stream.
42
+ return itertools.chain([validated_response], response)
43
+ return None # Indicates a retry is needed
44
+ except grpc.RpcError as e:
45
+ logging.error('Error processing streaming response: %s' % str(e))
46
+ return None # Indicates an error
47
+ else:
48
+ # Handle simple response validation
49
+ return handle_simple_response(response)
50
+
51
+
23
52
  def create_stub(auth_helper: ClarifaiAuthHelper = None, max_retry_attempts: int = 10) -> V2Stub:
24
53
  """
25
54
  Create client stub that handles authorization and basic retries for
@@ -109,10 +138,9 @@ class _RetryRpcCallable(RpcCallable):
109
138
  time.sleep(self.backoff_time) # TODO better backoff between attempts
110
139
  try:
111
140
  response = self.f(*args, **kwargs)
112
- if (response.status.code in throttle_status_codes) and attempt < self.max_attempts:
113
- logging.debug('Retrying with status %s' % str(response.status))
114
- else:
115
- return response
141
+ v = validate_response(response, attempt, self.max_attempts)
142
+ if v is not None:
143
+ return v
116
144
  except grpc.RpcError as e:
117
145
  if (e.code() in retry_codes_grpc) and attempt < self.max_attempts:
118
146
  logging.debug('Retrying with status %s' % e.code())
@@ -8,6 +8,7 @@ from google.protobuf.wrappers_pb2 import BoolValue
8
8
  from clarifai.client.auth import create_stub
9
9
  from clarifai.client.auth.helper import ClarifaiAuthHelper
10
10
  from clarifai.errors import ApiError, UserError
11
+ from clarifai.utils.constants import CLARIFAI_PAT_ENV_VAR, CLARIFAI_SESSION_TOKEN_ENV_VAR
11
12
  from clarifai.utils.misc import get_from_dict_or_env
12
13
 
13
14
 
@@ -36,9 +37,9 @@ class BaseClient:
36
37
  def __init__(self, **kwargs):
37
38
  token, pat = "", ""
38
39
  try:
39
- pat = get_from_dict_or_env(key="pat", env_key="CLARIFAI_PAT", **kwargs)
40
+ pat = get_from_dict_or_env(key="pat", env_key=CLARIFAI_PAT_ENV_VAR, **kwargs)
40
41
  except UserError:
41
- token = get_from_dict_or_env(key="token", env_key="CLARIFAI_SESSION_TOKEN", **kwargs)
42
+ token = get_from_dict_or_env(key="token", env_key=CLARIFAI_SESSION_TOKEN_ENV_VAR, **kwargs)
42
43
  finally:
43
44
  assert token or pat, Exception(
44
45
  "Need 'pat' or 'token' in args or use one of the CLARIFAI_PAT or CLARIFAI_SESSION_TOKEN env vars"
@@ -54,6 +55,11 @@ class BaseClient:
54
55
  self.base = self.auth_helper.base
55
56
  self.root_certificates_path = self.auth_helper._root_certificates_path
56
57
 
58
+ @classmethod
59
+ def from_env(cls, validate: bool = False):
60
+ auth = ClarifaiAuthHelper.from_env(validate=validate)
61
+ return cls.from_auth_helper(auth)
62
+
57
63
  @classmethod
58
64
  def from_auth_helper(cls, auth: ClarifaiAuthHelper, **kwargs):
59
65
  default_kwargs = {
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import os
3
3
  import time
4
- from typing import Any, Dict, Generator, List, Tuple, Union
4
+ from typing import Any, Dict, Generator, Iterator, List, Tuple, Union
5
5
 
6
6
  import numpy as np
7
7
  import requests
@@ -553,6 +553,315 @@ class Model(Lister, BaseClient):
553
553
  return self.predict(
554
554
  inputs=[input_proto], inference_params=inference_params, output_config=output_config)
555
555
 
556
+ def generate(self, inputs: List[Input], inference_params: Dict = {}, output_config: Dict = {}):
557
+ """Generate the stream output on model based on the given inputs.
558
+
559
+ Args:
560
+ inputs (list[Input]): The inputs to predict, must be less than 128.
561
+ """
562
+ if not isinstance(inputs, list):
563
+ raise UserError('Invalid inputs, inputs must be a list of Input objects.')
564
+ if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
565
+ raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
566
+ ) # TODO Use Chunker for inputs len > 128
567
+
568
+ self._override_model_version(inference_params, output_config)
569
+ request = service_pb2.PostModelOutputsRequest(
570
+ user_app_id=self.user_app_id,
571
+ model_id=self.id,
572
+ version_id=self.model_version.id,
573
+ inputs=inputs,
574
+ model=self.model_info)
575
+
576
+ start_time = time.time()
577
+ backoff_iterator = BackoffIterator(10)
578
+ generation_started = False
579
+ while True:
580
+ if generation_started:
581
+ break
582
+ stream_response = self._grpc_request(self.STUB.GenerateModelOutputs, request)
583
+ for response in stream_response:
584
+ if response.status.code == status_code_pb2.MODEL_DEPLOYING and \
585
+ time.time() - start_time < 60 * 10:
586
+ self.logger.info(f"{self.id} model is still deploying, please wait...")
587
+ time.sleep(next(backoff_iterator))
588
+ break
589
+ if response.status.code != status_code_pb2.SUCCESS:
590
+ raise Exception(f"Model Predict failed with response {response.status!r}")
591
+ else:
592
+ if not generation_started:
593
+ generation_started = True
594
+ yield response
595
+
596
+ def generate_by_filepath(self,
597
+ filepath: str,
598
+ input_type: str,
599
+ inference_params: Dict = {},
600
+ output_config: Dict = {}):
601
+ """Generate the stream output on model based on the given filepath.
602
+
603
+ Args:
604
+ filepath (str): The filepath to predict.
605
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
606
+ inference_params (dict): The inference params to override.
607
+ output_config (dict): The output config to override.
608
+ min_value (float): The minimum value of the prediction confidence to filter.
609
+ max_concepts (int): The maximum number of concepts to return.
610
+ select_concepts (list[Concept]): The concepts to select.
611
+
612
+ Example:
613
+ >>> from clarifai.client.model import Model
614
+ >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
615
+ or
616
+ >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
617
+ >>> stream_response = model.generate_by_filepath('/path/to/image.jpg', 'image')
618
+ >>> stream_response = model.generate_by_filepath('/path/to/text.txt', 'text')
619
+ >>> list_stream_response = [response for response in stream_response]
620
+ """
621
+ if not os.path.isfile(filepath):
622
+ raise UserError('Invalid filepath.')
623
+
624
+ with open(filepath, "rb") as f:
625
+ file_bytes = f.read()
626
+
627
+ return self.generate_by_bytes(file_bytes, input_type, inference_params, output_config)
628
+
629
+ def generate_by_bytes(self,
630
+ input_bytes: bytes,
631
+ input_type: str,
632
+ inference_params: Dict = {},
633
+ output_config: Dict = {}):
634
+ """Generate the stream output on model based on the given bytes.
635
+
636
+ Args:
637
+ input_bytes (bytes): File Bytes to predict on.
638
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
639
+ inference_params (dict): The inference params to override.
640
+ output_config (dict): The output config to override.
641
+ min_value (float): The minimum value of the prediction confidence to filter.
642
+ max_concepts (int): The maximum number of concepts to return.
643
+ select_concepts (list[Concept]): The concepts to select.
644
+
645
+ Example:
646
+ >>> from clarifai.client.model import Model
647
+ >>> model = Model("https://clarifai.com/openai/chat-completion/models/GPT-4")
648
+ >>> stream_response = model.generate_by_bytes(b'Write a tweet on future of AI',
649
+ input_type='text',
650
+ inference_params=dict(temperature=str(0.7), max_tokens=30)))
651
+ >>> list_stream_response = [response for response in stream_response]
652
+ """
653
+ if input_type not in {'image', 'text', 'video', 'audio'}:
654
+ raise UserError(
655
+ f"Got input type {input_type} but expected one of image, text, video, audio.")
656
+ if not isinstance(input_bytes, bytes):
657
+ raise UserError('Invalid bytes.')
658
+
659
+ if input_type == "image":
660
+ input_proto = Inputs.get_input_from_bytes("", image_bytes=input_bytes)
661
+ elif input_type == "text":
662
+ input_proto = Inputs.get_input_from_bytes("", text_bytes=input_bytes)
663
+ elif input_type == "video":
664
+ input_proto = Inputs.get_input_from_bytes("", video_bytes=input_bytes)
665
+ elif input_type == "audio":
666
+ input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
667
+
668
+ return self.generate(
669
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
670
+
671
+ def generate_by_url(self,
672
+ url: str,
673
+ input_type: str,
674
+ inference_params: Dict = {},
675
+ output_config: Dict = {}):
676
+ """Generate the stream output on model based on the given URL.
677
+
678
+ Args:
679
+ url (str): The URL to predict.
680
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
681
+ inference_params (dict): The inference params to override.
682
+ output_config (dict): The output config to override.
683
+ min_value (float): The minimum value of the prediction confidence to filter.
684
+ max_concepts (int): The maximum number of concepts to return.
685
+ select_concepts (list[Concept]): The concepts to select.
686
+
687
+ Example:
688
+ >>> from clarifai.client.model import Model
689
+ >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
690
+ or
691
+ >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
692
+ >>> stream_response = model.generate_by_url('url', 'image')
693
+ >>> list_stream_response = [response for response in stream_response]
694
+ """
695
+ if input_type not in {'image', 'text', 'video', 'audio'}:
696
+ raise UserError(
697
+ f"Got input type {input_type} but expected one of image, text, video, audio.")
698
+
699
+ if input_type == "image":
700
+ input_proto = Inputs.get_input_from_url("", image_url=url)
701
+ elif input_type == "text":
702
+ input_proto = Inputs.get_input_from_url("", text_url=url)
703
+ elif input_type == "video":
704
+ input_proto = Inputs.get_input_from_url("", video_url=url)
705
+ elif input_type == "audio":
706
+ input_proto = Inputs.get_input_from_url("", audio_url=url)
707
+
708
+ return self.generate(
709
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
710
+
711
+ def _req_iterator(self, input_iterator: Iterator[List[Input]]):
712
+ for inputs in input_iterator:
713
+ yield service_pb2.PostModelOutputsRequest(
714
+ user_app_id=self.user_app_id,
715
+ model_id=self.id,
716
+ version_id=self.model_version.id,
717
+ inputs=inputs,
718
+ model=self.model_info)
719
+
720
+ def stream(self,
721
+ inputs: Iterator[List[Input]],
722
+ inference_params: Dict = {},
723
+ output_config: Dict = {}):
724
+ """Generate the stream output on model based on the given stream of inputs.
725
+
726
+ Args:
727
+ inputs (Iterator[list[Input]]): stream of inputs to predict, must be less than 128.
728
+ """
729
+ # if not isinstance(inputs, Iterator[List[Input]]):
730
+ # raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
731
+
732
+ self._override_model_version(inference_params, output_config)
733
+ request = self._req_iterator(inputs)
734
+
735
+ start_time = time.time()
736
+ backoff_iterator = BackoffIterator(10)
737
+ generation_started = False
738
+ while True:
739
+ if generation_started:
740
+ break
741
+ stream_response = self._grpc_request(self.STUB.StreamModelOutputs, request)
742
+ for response in stream_response:
743
+ if response.status.code == status_code_pb2.MODEL_DEPLOYING and \
744
+ time.time() - start_time < 60 * 10:
745
+ self.logger.info(f"{self.id} model is still deploying, please wait...")
746
+ time.sleep(next(backoff_iterator))
747
+ break
748
+ if response.status.code != status_code_pb2.SUCCESS:
749
+ raise Exception(f"Model Predict failed with response {response.status!r}")
750
+ else:
751
+ if not generation_started:
752
+ generation_started = True
753
+ yield response
754
+
755
+ def stream_by_filepath(self,
756
+ filepath: str,
757
+ input_type: str,
758
+ inference_params: Dict = {},
759
+ output_config: Dict = {}):
760
+ """Stream the model output based on the given filepath.
761
+
762
+ Args:
763
+ filepath (str): The filepath to predict.
764
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
765
+ inference_params (dict): The inference params to override.
766
+ output_config (dict): The output config to override.
767
+ min_value (float): The minimum value of the prediction confidence to filter.
768
+ max_concepts (int): The maximum number of concepts to return.
769
+ select_concepts (list[Concept]): The concepts to select.
770
+
771
+ Example:
772
+ >>> from clarifai.client.model import Model
773
+ >>> model = Model("url")
774
+ >>> stream_response = model.stream_by_filepath('/path/to/image.jpg', 'image')
775
+ >>> list_stream_response = [response for response in stream_response]
776
+ """
777
+ if not os.path.isfile(filepath):
778
+ raise UserError('Invalid filepath.')
779
+
780
+ with open(filepath, "rb") as f:
781
+ file_bytes = f.read()
782
+
783
+ return self.stream_by_bytes(iter([file_bytes]), input_type, inference_params, output_config)
784
+
785
+ def stream_by_bytes(self,
786
+ input_bytes_iterator: Iterator[bytes],
787
+ input_type: str,
788
+ inference_params: Dict = {},
789
+ output_config: Dict = {}):
790
+ """Stream the model output based on the given bytes.
791
+
792
+ Args:
793
+ input_bytes_iterator (Iterator[bytes]): Iterator of file bytes to predict on.
794
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
795
+ inference_params (dict): The inference params to override.
796
+ output_config (dict): The output config to override.
797
+ min_value (float): The minimum value of the prediction confidence to filter.
798
+ max_concepts (int): The maximum number of concepts to return.
799
+ select_concepts (list[Concept]): The concepts to select.
800
+
801
+ Example:
802
+ >>> from clarifai.client.model import Model
803
+ >>> model = Model("https://clarifai.com/openai/chat-completion/models/GPT-4")
804
+ >>> stream_response = model.stream_by_bytes(iter([b'Write a tweet on future of AI']),
805
+ input_type='text',
806
+ inference_params=dict(temperature=str(0.7), max_tokens=30)))
807
+ >>> list_stream_response = [response for response in stream_response]
808
+ """
809
+ if input_type not in {'image', 'text', 'video', 'audio'}:
810
+ raise UserError(
811
+ f"Got input type {input_type} but expected one of image, text, video, audio.")
812
+
813
+ def input_generator():
814
+ for input_bytes in input_bytes_iterator:
815
+ if input_type == "image":
816
+ yield [Inputs.get_input_from_bytes("", image_bytes=input_bytes)]
817
+ elif input_type == "text":
818
+ yield [Inputs.get_input_from_bytes("", text_bytes=input_bytes)]
819
+ elif input_type == "video":
820
+ yield [Inputs.get_input_from_bytes("", video_bytes=input_bytes)]
821
+ elif input_type == "audio":
822
+ yield [Inputs.get_input_from_bytes("", audio_bytes=input_bytes)]
823
+
824
+ return self.stream(input_generator(), inference_params, output_config)
825
+
826
+ def stream_by_url(self,
827
+ url_iterator: Iterator[str],
828
+ input_type: str,
829
+ inference_params: Dict = {},
830
+ output_config: Dict = {}):
831
+ """Stream the model output based on the given URL.
832
+
833
+ Args:
834
+ url_iterator (Iterator[str]): Iterator of URLs to predict.
835
+ input_type (str): The type of input. Can be 'image', 'text', 'video' or 'audio.
836
+ inference_params (dict): The inference params to override.
837
+ output_config (dict): The output config to override.
838
+ min_value (float): The minimum value of the prediction confidence to filter.
839
+ max_concepts (int): The maximum number of concepts to return.
840
+ select_concepts (list[Concept]): The concepts to select.
841
+
842
+ Example:
843
+ >>> from clarifai.client.model import Model
844
+ >>> model = Model("url")
845
+ >>> stream_response = model.stream_by_url(iter(['url']), 'image')
846
+ >>> list_stream_response = [response for response in stream_response]
847
+ """
848
+ if input_type not in {'image', 'text', 'video', 'audio'}:
849
+ raise UserError(
850
+ f"Got input type {input_type} but expected one of image, text, video, audio.")
851
+
852
+ def input_generator():
853
+ for url in url_iterator:
854
+ if input_type == "image":
855
+ yield [Inputs.get_input_from_url("", image_url=url)]
856
+ elif input_type == "text":
857
+ yield [Inputs.get_input_from_url("", text_url=url)]
858
+ elif input_type == "video":
859
+ yield [Inputs.get_input_from_url("", video_url=url)]
860
+ elif input_type == "audio":
861
+ yield [Inputs.get_input_from_url("", audio_url=url)]
862
+
863
+ return self.stream(input_generator(), inference_params, output_config)
864
+
556
865
  def _override_model_version(self, inference_params: Dict = {}, output_config: Dict = {}) -> None:
557
866
  """Overrides the model version.
558
867
 
@@ -1,6 +1,6 @@
1
1
  import argparse
2
2
 
3
- from ..constants import CLARIFAI_PAT_PATH
3
+ from clarifai.utils.constants import CLARIFAI_PAT_PATH
4
4
  from ..utils import _persist_pat
5
5
  from .base import BaseClarifaiCli
6
6
 
@@ -97,11 +97,15 @@ class UploadModelSubCli(BaseClarifaiCli):
97
97
  self.file = args.file
98
98
  self.url = args.url
99
99
  if self.file:
100
- assert self.url, ValueError("Provide either file or url, but got both.")
100
+ assert not self.url, ValueError("Expected either file or url, not both.")
101
101
  assert os.path.exists(self.file), FileNotFoundError
102
102
  elif self.url:
103
- assert self.url.startswith("http") or self.url.startswith(
104
- "s3"), f"Invalid url supported http or s3 url. Got {self.url}"
103
+ if len(self.url.split(":")) == 1:
104
+ # if URL has no scheme, default to https
105
+ self.url = f"https://{self.url}"
106
+ assert self.url.startswith("http") or self.url.startswith("https") or self.url.startswith(
107
+ "s3"
108
+ ), f"Invalid URL scheme, supported schemes are 'http', 'https', or 's3'. Got {self.url}"
105
109
  self.file = None
106
110
  else:
107
111
  for _fname in os.listdir(working_dir_or_config):
@@ -1,20 +1,13 @@
1
1
  import os
2
2
 
3
+ from clarifai.utils.constants import CLARIFAI_HOME
4
+
3
5
  MAX_HW_DIM = 1024
4
6
  IMAGE_TENSOR_NAME = "image"
5
7
  TEXT_TENSOR_NAME = "text"
6
8
 
7
9
  BUILT_MODEL_EXT = ".clarifai"
8
10
 
9
- default_home = os.path.join(os.path.expanduser("~"), ".cache")
10
- CLARIFAI_HOME = os.path.expanduser(
11
- os.getenv(
12
- "CLARIFAI_HOME",
13
- os.path.join(os.getenv("XDG_CACHE_HOME", default_home), "clarifai"),
14
- ))
15
- os.makedirs(CLARIFAI_HOME, exist_ok=True)
16
- CLARIFAI_PAT_PATH = os.path.join(CLARIFAI_HOME, "pat")
17
-
18
11
  CLARIFAI_EXAMPLES_REPO = "https://github.com/Clarifai/examples.git"
19
12
  repo_name = CLARIFAI_EXAMPLES_REPO.split("/")[-1].replace(".git", "")
20
13
  CLARIFAI_EXAMPLES_REPO_PATH = os.path.join(CLARIFAI_HOME, repo_name)
@@ -0,0 +1,30 @@
1
+ import os
2
+
3
+ from clarifai.utils.constants import CLARIFAI_PAT_ENV_VAR, CLARIFAI_PAT_PATH
4
+
5
+
6
+ def _persist_pat(pat: str):
7
+ """ Write down pat to CLARIFAI_PAT_PATH """
8
+ with open(CLARIFAI_PAT_PATH, "w") as f:
9
+ f.write(pat)
10
+
11
+
12
+ def _read_pat():
13
+ if not os.path.exists(CLARIFAI_PAT_PATH) and not os.environ.get(CLARIFAI_PAT_ENV_VAR, ""):
14
+ return None
15
+ if os.path.exists(CLARIFAI_PAT_PATH):
16
+ with open(CLARIFAI_PAT_PATH, "r") as f:
17
+ return f.read().replace("\n", "").replace("\r", "").strip()
18
+ elif os.environ.get(CLARIFAI_PAT_ENV_VAR):
19
+ return os.environ.get(CLARIFAI_PAT_ENV_VAR)
20
+ else:
21
+ raise ValueError(
22
+ f"PAT not found, please run `clarifai login` to persist your PAT or set it as an environment variable under the name '{CLARIFAI_PAT_ENV_VAR}'"
23
+ )
24
+
25
+
26
+ def login(pat=None):
27
+ """ if pat provided, set pat to CLARIFAI_PAT otherwise read pat from file"""
28
+ pat = pat or _read_pat()
29
+ assert pat, Exception("PAT is not found, please run `clarifai login` to persist your PAT")
30
+ os.environ["CLARIFAI_PAT"] = pat
@@ -0,0 +1,12 @@
1
+ import os
2
+
3
+ USER_CACHE_DIR = os.path.join(os.path.expanduser("~"), ".cache")
4
+ CLARIFAI_HOME = os.path.expanduser(
5
+ os.getenv(
6
+ "CLARIFAI_HOME",
7
+ os.path.join(os.getenv("XDG_CACHE_HOME", USER_CACHE_DIR), "clarifai"),
8
+ ))
9
+ os.makedirs(CLARIFAI_HOME, exist_ok=True)
10
+ CLARIFAI_PAT_PATH = os.path.join(CLARIFAI_HOME, "pat")
11
+ CLARIFAI_PAT_ENV_VAR = "CLARIFAI_PAT"
12
+ CLARIFAI_SESSION_TOKEN_ENV_VAR = "CLARIFAI_SESSION_TOKEN"
@@ -1,6 +1,6 @@
1
1
  import os
2
2
 
3
- CLIENT_VERSION = "10.5.0"
3
+ CLIENT_VERSION = "10.5.1"
4
4
  OS_VER = os.sys.platform
5
5
  PYTHON_VERSION = '.'.join(
6
6
  map(str, [os.sys.version_info.major, os.sys.version_info.minor, os.sys.version_info.micro]))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: clarifai
3
- Version: 10.5.0
3
+ Version: 10.5.1
4
4
  Summary: Clarifai Python SDK
5
5
  Home-page: https://github.com/Clarifai/clarifai-python
6
6
  Author: Clarifai
@@ -108,6 +108,7 @@ clarifai/rag/utils.py
108
108
  clarifai/schema/search.py
109
109
  clarifai/urls/helper.py
110
110
  clarifai/utils/__init__.py
111
+ clarifai/utils/constants.py
111
112
  clarifai/utils/logging.py
112
113
  clarifai/utils/misc.py
113
114
  clarifai/utils/model_train.py
@@ -79,14 +79,6 @@ def test_exception_empty_user():
79
79
  ClarifaiAuthHelper("", "main", "fake_pat")
80
80
 
81
81
 
82
- def test_exception_empty_app():
83
- ClarifaiAuthHelper("clarifai", "", "fake_pat", validate=False)
84
- with pytest.raises(
85
- Exception,
86
- match="Need 'app_id' to not be empty in the query params or user CLARIFAI_APP_ID env var"):
87
- ClarifaiAuthHelper("clarifai", "", "fake_pat")
88
-
89
-
90
82
  def test_exception_empty_pat():
91
83
  ClarifaiAuthHelper("clarifai", "main", "", validate=False)
92
84
  with pytest.raises(
@@ -1,6 +1,6 @@
1
1
  import os
2
- import time
3
2
  import typing
3
+ import uuid
4
4
 
5
5
  import pytest
6
6
  from google.protobuf import struct_pb2
@@ -10,8 +10,8 @@ from clarifai.client.user import User
10
10
  from clarifai.errors import UserError
11
11
 
12
12
  CREATE_APP_USER_ID = os.environ["CLARIFAI_USER_ID"]
13
- now = int(time.time())
14
- CREATE_APP_ID = f"ci_search_app_{now}"
13
+ uniq = uuid.uuid4().hex[:10]
14
+ CREATE_APP_ID = f"ci_search_app_{uniq}"
15
15
  CREATE_DATASET_ID = "ci_search_dataset"
16
16
  DOG_IMG_URL = "https://samples.clarifai.com/dog.tiff"
17
17
  DATASET_IMAGES_DIR = os.path.dirname(__file__) + "/assets/voc/images"
clarifai-10.5.0/VERSION DELETED
@@ -1 +0,0 @@
1
- 10.5.0
@@ -1,23 +0,0 @@
1
- import os
2
-
3
- from .constants import CLARIFAI_PAT_PATH
4
-
5
-
6
- def _persist_pat(pat: str):
7
- """ Write down pat to CLARIFAI_PAT_PATH """
8
- with open(CLARIFAI_PAT_PATH, "w") as f:
9
- f.write(pat)
10
-
11
-
12
- def _read_pat():
13
- if not os.path.exists(CLARIFAI_PAT_PATH):
14
- return None
15
- with open(CLARIFAI_PAT_PATH, "r") as f:
16
- return f.read().replace("\n", "").replace("\r", "").strip()
17
-
18
-
19
- def login(pat=None):
20
- """ if pat provided, set pat to CLARIFAI_PAT otherwise read pat from file"""
21
- pat = pat or _read_pat()
22
- assert pat, Exception("PAT is not found, please run `clarifai login` to persist your PAT")
23
- os.environ["CLARIFAI_PAT"] = pat
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes