clarifai 11.8.4__tar.gz → 11.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. {clarifai-11.8.4/clarifai.egg-info → clarifai-11.9.0}/PKG-INFO +3 -3
  2. clarifai-11.9.0/clarifai/__init__.py +1 -0
  3. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/model.py +13 -6
  4. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/__init__.py +6 -0
  5. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/model_client.py +58 -19
  6. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/model_builder.py +329 -181
  7. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/method_signatures.py +9 -0
  8. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/constants.py +1 -0
  9. clarifai-11.9.0/clarifai/versions.py +32 -0
  10. {clarifai-11.8.4 → clarifai-11.9.0/clarifai.egg-info}/PKG-INFO +3 -3
  11. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai.egg-info/SOURCES.txt +2 -1
  12. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai.egg-info/requires.txt +2 -2
  13. {clarifai-11.8.4 → clarifai-11.9.0}/requirements.txt +2 -2
  14. clarifai-11.9.0/tests/test_with_proto_feature.py +222 -0
  15. clarifai-11.8.4/clarifai/__init__.py +0 -1
  16. clarifai-11.8.4/clarifai/versions.py +0 -9
  17. {clarifai-11.8.4 → clarifai-11.9.0}/LICENSE +0 -0
  18. {clarifai-11.8.4 → clarifai-11.9.0}/MANIFEST.in +0 -0
  19. {clarifai-11.8.4 → clarifai-11.9.0}/README.md +0 -0
  20. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/README.md +0 -0
  21. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/__init__.py +0 -0
  22. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/__main__.py +0 -0
  23. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/base.py +0 -0
  24. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/compute_cluster.py +0 -0
  25. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/deployment.py +0 -0
  26. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/nodepool.py +0 -0
  27. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/pipeline.py +0 -0
  28. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/pipeline_step.py +0 -0
  29. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/templates/__init__.py +0 -0
  30. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/templates/model_templates.py +0 -0
  31. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/templates/pipeline_step_templates.py +0 -0
  32. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli/templates/pipeline_templates.py +0 -0
  33. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/cli.py +0 -0
  34. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/app.py +0 -0
  35. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/auth/__init__.py +0 -0
  36. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/auth/helper.py +0 -0
  37. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/auth/register.py +0 -0
  38. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/auth/stub.py +0 -0
  39. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/base.py +0 -0
  40. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/compute_cluster.py +0 -0
  41. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/dataset.py +0 -0
  42. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/deployment.py +0 -0
  43. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/input.py +0 -0
  44. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/lister.py +0 -0
  45. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/model.py +0 -0
  46. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/module.py +0 -0
  47. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/nodepool.py +0 -0
  48. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/pipeline.py +0 -0
  49. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/pipeline_step.py +0 -0
  50. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/runner.py +0 -0
  51. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/search.py +0 -0
  52. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/user.py +0 -0
  53. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/client/workflow.py +0 -0
  54. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/constants/base.py +0 -0
  55. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/constants/dataset.py +0 -0
  56. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/constants/input.py +0 -0
  57. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/constants/model.py +0 -0
  58. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/constants/rag.py +0 -0
  59. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/constants/search.py +0 -0
  60. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/constants/workflow.py +0 -0
  61. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/__init__.py +0 -0
  62. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/export/__init__.py +0 -0
  63. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/export/inputs_annotations.py +0 -0
  64. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/__init__.py +0 -0
  65. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/base.py +0 -0
  66. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/features.py +0 -0
  67. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/image.py +0 -0
  68. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/loaders/README.md +0 -0
  69. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/loaders/__init__.py +0 -0
  70. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/loaders/coco_captions.py +0 -0
  71. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/loaders/coco_detection.py +0 -0
  72. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/loaders/imagenet_classification.py +0 -0
  73. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/loaders/xview_detection.py +0 -0
  74. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/multimodal.py +0 -0
  75. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/text.py +0 -0
  76. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/datasets/upload/utils.py +0 -0
  77. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/errors.py +0 -0
  78. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/models/__init__.py +0 -0
  79. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/models/api.py +0 -0
  80. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/modules/README.md +0 -0
  81. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/modules/__init__.py +0 -0
  82. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/modules/css.py +0 -0
  83. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/modules/pages.py +0 -0
  84. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/modules/style.css +0 -0
  85. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/rag/__init__.py +0 -0
  86. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/rag/rag.py +0 -0
  87. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/rag/utils.py +0 -0
  88. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/__init__.py +0 -0
  89. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/dockerfile_template/Dockerfile.template +0 -0
  90. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/__init__.py +0 -0
  91. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/dummy_openai_model.py +0 -0
  92. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/mcp_class.py +0 -0
  93. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/model_class.py +0 -0
  94. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/model_run_locally.py +0 -0
  95. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/model_runner.py +0 -0
  96. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/model_servicer.py +0 -0
  97. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/openai_class.py +0 -0
  98. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/visual_classifier_class.py +0 -0
  99. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/models/visual_detector_class.py +0 -0
  100. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/pipeline_steps/__init__.py +0 -0
  101. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/pipeline_steps/pipeline_step_builder.py +0 -0
  102. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/pipelines/__init__.py +0 -0
  103. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/pipelines/pipeline_builder.py +0 -0
  104. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/server.py +0 -0
  105. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/__init__.py +0 -0
  106. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/code_script.py +0 -0
  107. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/const.py +0 -0
  108. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/data_types/__init__.py +0 -0
  109. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/data_types/data_types.py +0 -0
  110. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/data_utils.py +0 -0
  111. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/loader.py +0 -0
  112. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/model_utils.py +0 -0
  113. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/openai_convertor.py +0 -0
  114. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/pipeline_validation.py +0 -0
  115. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/serializers.py +0 -0
  116. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/runners/utils/url_fetcher.py +0 -0
  117. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/schema/search.py +0 -0
  118. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/urls/helper.py +0 -0
  119. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/__init__.py +0 -0
  120. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/cli.py +0 -0
  121. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/config.py +0 -0
  122. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/evaluation/__init__.py +0 -0
  123. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/evaluation/helpers.py +0 -0
  124. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/evaluation/main.py +0 -0
  125. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/evaluation/testset_annotation_parser.py +0 -0
  126. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/hashing.py +0 -0
  127. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/logging.py +0 -0
  128. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/misc.py +0 -0
  129. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/model_train.py +0 -0
  130. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/protobuf.py +0 -0
  131. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/utils/secrets.py +0 -0
  132. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/workflows/__init__.py +0 -0
  133. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/workflows/export.py +0 -0
  134. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/workflows/utils.py +0 -0
  135. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai/workflows/validate.py +0 -0
  136. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai.egg-info/dependency_links.txt +0 -0
  137. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai.egg-info/entry_points.txt +0 -0
  138. {clarifai-11.8.4 → clarifai-11.9.0}/clarifai.egg-info/top_level.txt +0 -0
  139. {clarifai-11.8.4 → clarifai-11.9.0}/pyproject.toml +0 -0
  140. {clarifai-11.8.4 → clarifai-11.9.0}/setup.cfg +0 -0
  141. {clarifai-11.8.4 → clarifai-11.9.0}/setup.py +0 -0
  142. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_app.py +0 -0
  143. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_async_stub.py +0 -0
  144. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_auth.py +0 -0
  145. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_data_upload.py +0 -0
  146. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_eval.py +0 -0
  147. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_list_models.py +0 -0
  148. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_misc.py +0 -0
  149. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_model_predict.py +0 -0
  150. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_model_train.py +0 -0
  151. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_modules.py +0 -0
  152. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_pipeline_client.py +0 -0
  153. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_rag.py +0 -0
  154. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_search.py +0 -0
  155. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_secrets.py +0 -0
  156. {clarifai-11.8.4 → clarifai-11.9.0}/tests/test_stub.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clarifai
3
- Version: 11.8.4
3
+ Version: 11.9.0
4
4
  Home-page: https://github.com/Clarifai/clarifai-python
5
5
  Author: Clarifai
6
6
  Author-email: support@clarifai.com
@@ -19,8 +19,8 @@ Classifier: Operating System :: OS Independent
19
19
  Requires-Python: >=3.8
20
20
  Description-Content-Type: text/markdown
21
21
  License-File: LICENSE
22
- Requires-Dist: clarifai-grpc>=11.8.2
23
- Requires-Dist: clarifai-protocol>=0.0.32
22
+ Requires-Dist: clarifai-grpc>=11.9.8
23
+ Requires-Dist: clarifai-protocol>=0.0.33
24
24
  Requires-Dist: numpy>=1.22.0
25
25
  Requires-Dist: tqdm>=4.65.0
26
26
  Requires-Dist: PyYAML>=6.0.1
@@ -0,0 +1 @@
1
+ __version__ = "11.9.0"
@@ -29,6 +29,7 @@ from clarifai.utils.constants import (
29
29
  DEFAULT_LOCAL_RUNNER_NODEPOOL_ID,
30
30
  DEFAULT_OLLAMA_MODEL_REPO_BRANCH,
31
31
  DEFAULT_PYTHON_MODEL_REPO_BRANCH,
32
+ DEFAULT_SGLANG_MODEL_REPO_BRANCH,
32
33
  DEFAULT_TOOLKIT_MODEL_REPO,
33
34
  DEFAULT_VLLM_MODEL_REPO_BRANCH,
34
35
  )
@@ -76,15 +77,15 @@ def model():
76
77
  @click.option(
77
78
  '--toolkit',
78
79
  type=click.Choice(
79
- ['ollama', 'huggingface', 'lmstudio', 'vllm', 'python'], case_sensitive=False
80
+ ['ollama', 'huggingface', 'lmstudio', 'vllm', 'sglang', 'python'], case_sensitive=False
80
81
  ),
81
82
  required=False,
82
- help='Toolkit to use for model initialization. Currently supports "ollama", "huggingface", "lmstudio", "vllm" and "python".',
83
+ help='Toolkit to use for model initialization. Currently supports "ollama", "huggingface", "lmstudio", "vllm", "sglang" and "python".',
83
84
  )
84
85
  @click.option(
85
86
  '--model-name',
86
87
  required=False,
87
- help='Model name to configure when using --toolkit. For ollama toolkit, this sets the Ollama model to use (e.g., "llama3.1", "mistral", etc.). For vllm & huggingface toolkit, this sets the Hugging Face model repo_id (e.g., "unsloth/Llama-3.2-1B-Instruct").\n For lmstudio toolkit, this sets the LM Studio model name (e.g., "qwen/qwen3-4b-thinking-2507").\n',
88
+ help='Model name to configure when using --toolkit. For ollama toolkit, this sets the Ollama model to use (e.g., "llama3.1", "mistral", etc.). For vllm, sglang & huggingface toolkit, this sets the Hugging Face model repo_id (e.g., "unsloth/Llama-3.2-1B-Instruct").\n For lmstudio toolkit, this sets the LM Studio model name (e.g., "qwen/qwen3-4b-thinking-2507").\n',
88
89
  )
89
90
  @click.option(
90
91
  '--port',
@@ -129,8 +130,8 @@ def init(
129
130
  MODEL_TYPE_ID: Type of model to create. If not specified, defaults to "text-to-text" for text models.\n
130
131
  GITHUB_PAT: GitHub Personal Access Token for authentication when cloning private repositories.\n
131
132
  GITHUB_URL: GitHub repository URL or "repo" format to clone a repository from. If provided, the entire repository contents will be copied to the target directory instead of using default templates.\n
132
- TOOLKIT: Toolkit to use for model initialization. Currently supports "ollama", "huggingface", "lmstudio", "vllm" and "python".\n
133
- MODEL_NAME: Model name to configure when using --toolkit. For ollama toolkit, this sets the Ollama model to use (e.g., "llama3.1", "mistral", etc.). For vllm & huggingface toolkit, this sets the Hugging Face model repo_id (e.g., "Qwen/Qwen3-4B-Instruct-2507"). For lmstudio toolkit, this sets the LM Studio model name (e.g., "qwen/qwen3-4b-thinking-2507").\n
133
+ TOOLKIT: Toolkit to use for model initialization. Currently supports "ollama", "huggingface", "lmstudio", "vllm", "sglang" and "python".\n
134
+ MODEL_NAME: Model name to configure when using --toolkit. For ollama toolkit, this sets the Ollama model to use (e.g., "llama3.1", "mistral", etc.). For vllm, sglang & huggingface toolkit, this sets the Hugging Face model repo_id (e.g., "Qwen/Qwen3-4B-Instruct-2507"). For lmstudio toolkit, this sets the LM Studio model name (e.g., "qwen/qwen3-4b-thinking-2507").\n
134
135
  PORT: Port to run the (Ollama/lmstudio) server on. Defaults to 23333.\n
135
136
  CONTEXT_LENGTH: Context length for the (Ollama/lmstudio) model. Defaults to 8192.\n
136
137
  """
@@ -183,6 +184,9 @@ def init(
183
184
  elif toolkit == 'vllm':
184
185
  github_url = DEFAULT_TOOLKIT_MODEL_REPO
185
186
  branch = DEFAULT_VLLM_MODEL_REPO_BRANCH
187
+ elif toolkit == 'sglang':
188
+ github_url = DEFAULT_TOOLKIT_MODEL_REPO
189
+ branch = DEFAULT_SGLANG_MODEL_REPO_BRANCH
186
190
  elif toolkit == 'python':
187
191
  github_url = DEFAULT_TOOLKIT_MODEL_REPO
188
192
  branch = DEFAULT_PYTHON_MODEL_REPO_BRANCH
@@ -320,7 +324,9 @@ def init(
320
324
  if (user_id or model_name or port or context_length) and (toolkit == 'lmstudio'):
321
325
  customize_lmstudio_model(model_path, user_id, model_name, port, context_length)
322
326
 
323
- if (user_id or model_name) and (toolkit == 'huggingface' or toolkit == 'vllm'):
327
+ if (user_id or model_name) and (
328
+ toolkit == 'huggingface' or toolkit == 'vllm' or toolkit == 'sglang'
329
+ ):
324
330
  # Update the config.yaml file with the provided model name
325
331
  customize_huggingface_model(model_path, user_id, model_name)
326
332
 
@@ -1010,6 +1016,7 @@ def local_runner(ctx, model_path, pool_size, verbose):
1010
1016
  logger.info("Customizing Ollama model with provided parameters...")
1011
1017
  customize_ollama_model(
1012
1018
  model_path=model_path,
1019
+ user_id=user_id,
1013
1020
  verbose=True if verbose else False,
1014
1021
  )
1015
1022
  except Exception as e:
@@ -2,11 +2,14 @@ from clarifai.client.app import App
2
2
  from clarifai.client.auth.register import V2Stub
3
3
  from clarifai.client.auth.stub import create_stub
4
4
  from clarifai.client.base import BaseClient
5
+ from clarifai.client.compute_cluster import ComputeCluster
5
6
  from clarifai.client.dataset import Dataset
7
+ from clarifai.client.deployment import Deployment
6
8
  from clarifai.client.input import Inputs
7
9
  from clarifai.client.lister import Lister
8
10
  from clarifai.client.model import Model
9
11
  from clarifai.client.module import Module
12
+ from clarifai.client.nodepool import Nodepool
10
13
  from clarifai.client.pipeline import Pipeline
11
14
  from clarifai.client.pipeline_step import PipelineStep
12
15
  from clarifai.client.search import Search
@@ -28,4 +31,7 @@ __all__ = [
28
31
  'Inputs',
29
32
  'BaseClient',
30
33
  'Search',
34
+ 'ComputeCluster',
35
+ 'Nodepool',
36
+ 'Deployment',
31
37
  ]
@@ -11,6 +11,7 @@ from clarifai.constants.model import MAX_MODEL_PREDICT_INPUTS
11
11
  from clarifai.errors import UserError
12
12
  from clarifai.runners.utils import code_script, method_signatures
13
13
  from clarifai.runners.utils.method_signatures import (
14
+ RESERVED_PARAM_WITH_PROTO,
14
15
  CompatibilitySerializer,
15
16
  deserialize,
16
17
  get_stream_from_signature,
@@ -204,6 +205,9 @@ class ModelClient:
204
205
 
205
206
  def bind_f(method_name, method_argnames, call_func, async_call_func):
206
207
  def sync_f(*args, **kwargs):
208
+ # Extract with_proto parameter if present
209
+ with_proto = kwargs.pop(RESERVED_PARAM_WITH_PROTO, False)
210
+
207
211
  if len(args) > len(method_argnames):
208
212
  raise TypeError(
209
213
  f"{method_name}() takes {len(method_argnames)} positional arguments but {len(args)} were given"
@@ -221,7 +225,7 @@ class ModelClient:
221
225
  )
222
226
  if is_batch_input_valid and (not is_openai_chat_format(batch_inputs)):
223
227
  # If the batch input is valid, call the function with the batch inputs and the method name
224
- return call_func(batch_inputs, method_name)
228
+ return call_func(batch_inputs, method_name, with_proto=with_proto)
225
229
 
226
230
  for name, arg in zip(
227
231
  method_argnames, args
@@ -229,10 +233,13 @@ class ModelClient:
229
233
  if name in kwargs:
230
234
  raise TypeError(f"Multiple values for argument {name}")
231
235
  kwargs[name] = arg
232
- return call_func(kwargs, method_name)
236
+ return call_func(kwargs, method_name, with_proto=with_proto)
233
237
 
234
238
  async def async_f(*args, **kwargs):
235
239
  # Async version to call the async function
240
+ # Extract with_proto parameter if present
241
+ with_proto = kwargs.pop(RESERVED_PARAM_WITH_PROTO, False)
242
+
236
243
  if len(args) > len(method_argnames):
237
244
  raise TypeError(
238
245
  f"{method_name}() takes {len(method_argnames)} positional arguments but {len(args)} were given"
@@ -249,7 +256,9 @@ class ModelClient:
249
256
  )
250
257
  if is_batch_input_valid and (not is_openai_chat_format(batch_inputs)):
251
258
  # If the batch input is valid, call the function with the batch inputs and the method name
252
- return async_call_func(batch_inputs, method_name)
259
+ return async_call_func(
260
+ batch_inputs, method_name, with_proto=with_proto
261
+ )
253
262
 
254
263
  for name, arg in zip(
255
264
  method_argnames, args
@@ -258,7 +267,7 @@ class ModelClient:
258
267
  raise TypeError(f"Multiple values for argument {name}")
259
268
  kwargs[name] = arg
260
269
 
261
- return async_call_func(kwargs, method_name)
270
+ return async_call_func(kwargs, method_name, with_proto=with_proto)
262
271
 
263
272
  class MethodWrapper:
264
273
  def __call__(self, *args, **kwargs):
@@ -364,6 +373,7 @@ class ModelClient:
364
373
  self,
365
374
  inputs, # TODO set up functions according to fetched signatures?
366
375
  method_name: str = 'predict',
376
+ with_proto: bool = False,
367
377
  ) -> Any:
368
378
  input_signature = self._method_signatures[method_name].input_fields
369
379
  output_signature = self._method_signatures[method_name].output_fields
@@ -385,9 +395,12 @@ class ModelClient:
385
395
  outputs = []
386
396
  for output in response.outputs:
387
397
  outputs.append(deserialize(output.data, output_signature, is_output=True))
388
- if batch_input:
389
- return outputs
390
- return outputs[0]
398
+
399
+ result = outputs if batch_input else outputs[0]
400
+
401
+ if with_proto:
402
+ return result, response
403
+ return result
391
404
 
392
405
  def _predict_by_proto(
393
406
  self,
@@ -448,15 +461,17 @@ class ModelClient:
448
461
  self,
449
462
  inputs,
450
463
  method_name: str = 'predict',
464
+ with_proto: bool = False,
451
465
  ) -> Any:
452
466
  """Asynchronously process inputs and make predictions.
453
467
 
454
468
  Args:
455
469
  inputs: Input data to process
456
470
  method_name (str): Name of the method to call
471
+ with_proto (bool): If True, return both the processed result and the raw protobuf response
457
472
 
458
473
  Returns:
459
- Processed prediction results
474
+ Processed prediction results, optionally with protobuf response
460
475
  """
461
476
  # method_name is set to 'predict' by default, this is because to replicate the input and output signature behaviour of sync to async predict.
462
477
  input_signature = self._method_signatures[method_name].input_fields
@@ -477,7 +492,11 @@ class ModelClient:
477
492
  for output in response.outputs:
478
493
  outputs.append(deserialize(output.data, output_signature, is_output=True))
479
494
 
480
- return outputs if batch_input else outputs[0]
495
+ result = outputs if batch_input else outputs[0]
496
+
497
+ if with_proto:
498
+ return result, response
499
+ return result
481
500
 
482
501
  async def _async_predict_by_proto(
483
502
  self,
@@ -551,6 +570,7 @@ class ModelClient:
551
570
  self,
552
571
  inputs, # TODO set up functions according to fetched signatures?
553
572
  method_name: str = 'generate',
573
+ with_proto: bool = False,
554
574
  ) -> Any:
555
575
  input_signature = self._method_signatures[method_name].input_fields
556
576
  output_signature = self._method_signatures[method_name].output_fields
@@ -572,10 +592,13 @@ class ModelClient:
572
592
  outputs = []
573
593
  for output in response.outputs:
574
594
  outputs.append(deserialize(output.data, output_signature, is_output=True))
575
- if batch_input:
576
- yield outputs
595
+
596
+ result = outputs if batch_input else outputs[0]
597
+
598
+ if with_proto:
599
+ yield result, response
577
600
  else:
578
- yield outputs[0]
601
+ yield result
579
602
 
580
603
  def _generate_by_proto(
581
604
  self,
@@ -641,6 +664,7 @@ class ModelClient:
641
664
  self,
642
665
  inputs,
643
666
  method_name: str = 'generate',
667
+ with_proto: bool = False,
644
668
  ) -> Any:
645
669
  # method_name is set to 'generate' by default, this is because to replicate the input and output signature behaviour of sync to async generate.
646
670
  input_signature = self._method_signatures[method_name].input_fields
@@ -654,18 +678,21 @@ class ModelClient:
654
678
  proto_inputs = []
655
679
  for input in inputs:
656
680
  proto = resources_pb2.Input()
657
- serialize(input, input_signature, proto.data)
658
- proto_inputs.append(proto)
681
+ serialize(input, input_signature, proto.data)
682
+ proto_inputs.append(proto)
659
683
  response_stream = self._async_generate_by_proto(proto_inputs, method_name)
660
684
 
661
685
  async for response in response_stream:
662
686
  outputs = []
663
687
  for output in response.outputs:
664
688
  outputs.append(deserialize(output.data, output_signature, is_output=True))
665
- if batch_input:
666
- yield outputs
689
+
690
+ result = outputs if batch_input else outputs[0]
691
+
692
+ if with_proto:
693
+ yield result, response
667
694
  else:
668
- yield outputs[0]
695
+ yield result
669
696
 
670
697
  async def _async_generate_by_proto(
671
698
  self,
@@ -734,6 +761,7 @@ class ModelClient:
734
761
  self,
735
762
  inputs,
736
763
  method_name: str = 'stream',
764
+ with_proto: bool = False,
737
765
  ) -> Any:
738
766
  input_signature = self._method_signatures[method_name].input_fields
739
767
  output_signature = self._method_signatures[method_name].output_fields
@@ -775,7 +803,12 @@ class ModelClient:
775
803
 
776
804
  for response in response_stream:
777
805
  assert len(response.outputs) == 1, 'streaming methods must have exactly one output'
778
- yield deserialize(response.outputs[0].data, output_signature, is_output=True)
806
+ result = deserialize(response.outputs[0].data, output_signature, is_output=True)
807
+
808
+ if with_proto:
809
+ yield result, response
810
+ else:
811
+ yield result
779
812
 
780
813
  def _req_iterator(
781
814
  self,
@@ -843,6 +876,7 @@ class ModelClient:
843
876
  self,
844
877
  inputs,
845
878
  method_name: str = 'stream',
879
+ with_proto: bool = False,
846
880
  ) -> Any:
847
881
  # method_name is set to 'stream' by default, this is because to replicate the input and output signature behaviour of sync to async stream.
848
882
  input_signature = self._method_signatures[method_name].input_fields
@@ -885,7 +919,12 @@ class ModelClient:
885
919
 
886
920
  async for response in response_stream:
887
921
  assert len(response.outputs) == 1, 'streaming methods must have exactly one output'
888
- yield deserialize(response.outputs[0].data, output_signature, is_output=True)
922
+ result = deserialize(response.outputs[0].data, output_signature, is_output=True)
923
+
924
+ if with_proto:
925
+ yield result, response
926
+ else:
927
+ yield result
889
928
 
890
929
  async def _async_stream_by_proto(
891
930
  self,