clarifai 11.4.10__py3-none-any.whl → 11.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clarifai/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "11.4.10"
1
+ __version__ = "11.5.1"
clarifai/client/app.py CHANGED
@@ -470,8 +470,8 @@ class App(Lister, BaseClient):
470
470
  model = self.model(
471
471
  model_id=node['model']['model_id'],
472
472
  model_version={"id": node['model'].get('model_version_id', "")},
473
- user_id=node['model'].get('user_id', ""),
474
- app_id=node['model'].get('app_id', ""),
473
+ user_id=node['model'].get('user_id', self.user_app_id.user_id),
474
+ app_id=node['model'].get('app_id', self.user_app_id.app_id),
475
475
  )
476
476
  except Exception as e:
477
477
  if "Model does not exist" in str(e):
@@ -1,11 +1,11 @@
1
- from .models.model_builder import ModelBuilder
1
+ from .models.mcp_class import MCPModelClass
2
2
  from .models.model_class import ModelClass
3
3
  from .models.model_runner import ModelRunner
4
4
  from .models.openai_class import OpenAIModelClass
5
5
 
6
6
  __all__ = [
7
7
  "ModelRunner",
8
- "ModelBuilder",
9
8
  "ModelClass",
9
+ "MCPModelClass",
10
10
  "OpenAIModelClass",
11
11
  ]
@@ -3,9 +3,13 @@ FROM --platform=$TARGETPLATFORM ${FINAL_IMAGE} as final
3
3
 
4
4
  COPY --link requirements.txt /home/nonroot/requirements.txt
5
5
 
6
+ ENV VIRTUAL_ENV=/venv
7
+ ENV PATH="/home/nonroot/.local/bin:$VIRTUAL_ENV/bin:$PATH"
8
+
9
+
6
10
  # Update clarifai package so we always have latest protocol to the API. Everything should land in /venv
7
- RUN ["pip", "install", "--no-cache-dir", "-r", "/home/nonroot/requirements.txt"]
8
- RUN ["pip", "show", "clarifai"]
11
+ RUN ["uv", "pip", "install", "--no-cache-dir", "-r", "/home/nonroot/requirements.txt"]
12
+ RUN ["uv", "pip", "show", "--no-cache-dir", "clarifai"]
9
13
 
10
14
  # Set the NUMBA cache dir to /tmp
11
15
  # Set the TORCHINDUCTOR cache dir to /tmp
@@ -13,9 +13,9 @@ class MockOpenAIClient:
13
13
  def create(self, **kwargs):
14
14
  """Mock create method for compatibility."""
15
15
  if kwargs.get("stream", False):
16
- return MockCompletionStream(kwargs.get("messages", []))
16
+ return MockCompletionStream(**kwargs)
17
17
  else:
18
- return MockCompletion(kwargs.get("messages", []))
18
+ return MockCompletion(**kwargs)
19
19
 
20
20
  def __init__(self):
21
21
  self.chat = self # Make self.chat point to self for compatibility
@@ -25,6 +25,19 @@ class MockOpenAIClient:
25
25
  class MockCompletion:
26
26
  """Mock completion object that mimics the OpenAI completion response structure."""
27
27
 
28
+ class Usage:
29
+ def __init__(self, prompt_tokens, completion_tokens, total_tokens):
30
+ self.total_tokens = total_tokens
31
+ self.prompt_tokens = prompt_tokens
32
+ self.completion_tokens = completion_tokens
33
+
34
+ def to_dict(self):
35
+ return dict(
36
+ total_tokens=self.total_tokens,
37
+ prompt_tokens=self.prompt_tokens,
38
+ completion_tokens=self.completion_tokens,
39
+ )
40
+
28
41
  class Choice:
29
42
  class Message:
30
43
  def __init__(self, content):
@@ -36,17 +49,21 @@ class MockCompletion:
36
49
  self.finish_reason = "stop"
37
50
  self.index = 0
38
51
 
39
- def __init__(self, messages):
52
+ def __init__(self, **kwargs):
40
53
  # Generate a simple response based on the last message
54
+ messages = kwargs.get("messages")
41
55
  last_message = messages[-1] if messages else {"content": ""}
42
56
  response_text = f"Echo: {last_message.get('content', '')}"
43
57
 
44
58
  self.choices = [self.Choice(response_text)]
45
- self.usage = {
46
- "prompt_tokens": len(str(messages)),
47
- "completion_tokens": len(response_text),
48
- "total_tokens": len(str(messages)) + len(response_text),
49
- }
59
+ self.usage = self.Usage(
60
+ **{
61
+ "prompt_tokens": len(str(messages)),
62
+ "completion_tokens": len(response_text),
63
+ "total_tokens": len(str(messages)) + len(response_text),
64
+ }
65
+ )
66
+
50
67
  self.id = "dummy-completion-id"
51
68
  self.created = 1234567890
52
69
  self.model = "dummy-model"
@@ -65,9 +82,12 @@ class MockCompletion:
65
82
  }
66
83
  for choice in self.choices
67
84
  ],
68
- "usage": self.usage,
85
+ "usage": self.usage.to_dict(),
69
86
  }
70
87
 
88
+ def model_dump(self):
89
+ return self.to_dict()
90
+
71
91
 
72
92
  class MockCompletionStream:
73
93
  """Mock completion stream that mimics the OpenAI streaming response structure."""
@@ -79,14 +99,27 @@ class MockCompletionStream:
79
99
  self.content = content
80
100
  self.role = "assistant" if content is None else None
81
101
 
102
+ class Usage:
103
+ def __init__(self, prompt_tokens, completion_tokens, total_tokens):
104
+ self.total_tokens = total_tokens
105
+ self.prompt_tokens = prompt_tokens
106
+ self.completion_tokens = completion_tokens
107
+
108
+ def to_dict(self):
109
+ return dict(
110
+ total_tokens=self.total_tokens,
111
+ prompt_tokens=self.prompt_tokens,
112
+ completion_tokens=self.completion_tokens,
113
+ )
114
+
82
115
  def __init__(self, content=None, include_usage=False):
83
116
  self.delta = self.Delta(content)
84
117
  self.finish_reason = None if content else "stop"
85
118
  self.index = 0
86
119
  self.usage = (
87
- {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}
120
+ self.Usage(**{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15})
88
121
  if include_usage
89
- else None
122
+ else self.Usage(None, None, None)
90
123
  )
91
124
 
92
125
  def __init__(self, content=None, include_usage=False):
@@ -114,11 +147,16 @@ class MockCompletionStream:
114
147
  ],
115
148
  }
116
149
  if self.usage:
117
- result["usage"] = self.usage
150
+ result["usage"] = self.usage.to_dict()
118
151
  return result
119
152
 
120
- def __init__(self, messages):
153
+ def model_dump(self):
154
+ return self.to_dict()
155
+
156
+ def __init__(self, **kwargs):
121
157
  # Generate a simple response based on the last message
158
+ messages = kwargs.get("messages")
159
+
122
160
  last_message = messages[-1] if messages else {"content": ""}
123
161
  self.response_text = f"Echo: {last_message.get('content', '')}"
124
162
  # Create chunks that ensure the full text is included in the first chunk
@@ -127,7 +165,7 @@ class MockCompletionStream:
127
165
  "", # Final chunk is empty to indicate completion
128
166
  ]
129
167
  self.current_chunk = 0
130
- self.include_usage = False
168
+ self.include_usage = kwargs.get("stream_options", {}).get("include_usage")
131
169
 
132
170
  def __iter__(self):
133
171
  return self
@@ -150,18 +188,14 @@ class DummyOpenAIModel(OpenAIModelClass):
150
188
  def _process_request(self, **kwargs) -> Dict[str, Any]:
151
189
  """Process a request for non-streaming responses."""
152
190
  completion_args = self._create_completion_args(kwargs)
153
- return self.client.chat.completions.create(**completion_args).to_dict()
191
+ return self.client.chat.completions.create(**completion_args).model_dump()
154
192
 
155
193
  def _process_streaming_request(self, **kwargs) -> Iterator[Dict[str, Any]]:
156
194
  """Process a request for streaming responses."""
157
- completion_args = self._create_completion_args(kwargs, stream=True)
158
- completion_stream = self.client.chat.completions.create(**completion_args)
159
- completion_stream.include_usage = kwargs.get('stream_options', {}).get(
160
- 'include_usage', False
161
- )
195
+ completion_stream = self.client.chat.completions.create(**kwargs)
162
196
 
163
197
  for chunk in completion_stream:
164
- yield chunk.to_dict()
198
+ yield chunk.model_dump()
165
199
 
166
200
  # Override the method directly for testing
167
201
  @OpenAIModelClass.method
@@ -169,14 +203,13 @@ class DummyOpenAIModel(OpenAIModelClass):
169
203
  """Direct implementation for testing purposes."""
170
204
  try:
171
205
  request_data = json.loads(req)
172
- params = self._extract_request_params(request_data)
173
-
206
+ request_data = self._create_completion_args(request_data)
174
207
  # Validate messages
175
- if not params.get("messages"):
208
+ if not request_data.get("messages"):
176
209
  yield "Error: No messages provided"
177
210
  return
178
211
 
179
- for message in params["messages"]:
212
+ for message in request_data["messages"]:
180
213
  if (
181
214
  not isinstance(message, dict)
182
215
  or "role" not in message
@@ -185,7 +218,7 @@ class DummyOpenAIModel(OpenAIModelClass):
185
218
  yield "Error: Invalid message format"
186
219
  return
187
220
 
188
- for chunk in self._process_streaming_request(**params):
221
+ for chunk in self._process_streaming_request(**request_data):
189
222
  yield json.dumps(chunk)
190
223
  except Exception as e:
191
224
  yield f"Error: {str(e)}"
@@ -2,14 +2,13 @@
2
2
 
3
3
  import asyncio
4
4
  import json
5
- from typing import Any
6
-
7
- from fastmcp import Client, FastMCP # use fastmcp v2 not the built in mcp
8
- from mcp import types
9
- from mcp.shared.exceptions import McpError
5
+ from typing import TYPE_CHECKING, Any
10
6
 
11
7
  from clarifai.runners.models.model_class import ModelClass
12
8
 
9
+ if TYPE_CHECKING:
10
+ from fastmcp import FastMCP
11
+
13
12
 
14
13
  class MCPModelClass(ModelClass):
15
14
  """Base class for wrapping FastMCP servers as a model running in Clarfai. This handles
@@ -19,10 +18,17 @@ class MCPModelClass(ModelClass):
19
18
  """
20
19
 
21
20
  def load_model(self):
21
+ try:
22
+ from fastmcp import Client
23
+ except ImportError:
24
+ raise ImportError(
25
+ "fastmcp package is required to use MCP functionality. "
26
+ "Install it with: pip install fastmcp"
27
+ )
22
28
  # in memory transport provided in fastmcp v2 so we can easily use the client functions.
23
29
  self.client = Client(self.get_server())
24
30
 
25
- def get_server(self) -> FastMCP:
31
+ def get_server(self) -> 'FastMCP':
26
32
  """Required method for each subclass to implement to return the FastMCP server to use."""
27
33
  raise NotImplementedError("Subclasses must implement get_server() method")
28
34
 
@@ -32,6 +38,8 @@ class MCPModelClass(ModelClass):
32
38
  return it's response.
33
39
 
34
40
  """
41
+ from mcp import types
42
+ from mcp.shared.exceptions import McpError
35
43
 
36
44
  async def send_notification(client_message: types.ClientNotification) -> None:
37
45
  async with self.client:
@@ -4,9 +4,11 @@ import inspect
4
4
  import os
5
5
  import re
6
6
  import shutil
7
+ import subprocess
7
8
  import sys
8
9
  import tarfile
9
10
  import time
11
+ import webbrowser
10
12
  from string import Template
11
13
  from unittest.mock import MagicMock
12
14
 
@@ -16,6 +18,7 @@ from clarifai_grpc.grpc.api.status import status_code_pb2
16
18
  from google.protobuf import json_format
17
19
 
18
20
  from clarifai.client.base import BaseClient
21
+ from clarifai.client.user import User
19
22
  from clarifai.runners.models.model_class import ModelClass
20
23
  from clarifai.runners.utils.const import (
21
24
  AMD_PYTHON_BASE_IMAGE,
@@ -61,7 +64,12 @@ def is_related(object_class, main_class):
61
64
  class ModelBuilder:
62
65
  DEFAULT_CHECKPOINT_SIZE = 50 * 1024**3 # 50 GiB
63
66
 
64
- def __init__(self, folder: str, validate_api_ids: bool = True, download_validation_only=False):
67
+ def __init__(
68
+ self,
69
+ folder: str,
70
+ validate_api_ids: bool = True,
71
+ download_validation_only: bool = False,
72
+ ):
65
73
  """
66
74
  :param folder: The folder containing the model.py, config.yaml, requirements.txt and
67
75
  checkpoints.
@@ -563,6 +571,42 @@ class ModelBuilder:
563
571
  dependencies_version[dependency] = version if version else None
564
572
  return dependencies_version
565
573
 
574
+ def _validate_requirements(self, python_version):
575
+ """here we use uv pip compile to validate the requirements.txt file
576
+ and ensure that the dependencies are compatible with each other prior to uploading
577
+ """
578
+ if not os.path.exists(os.path.join(self.folder, 'requirements.txt')):
579
+ raise FileNotFoundError(
580
+ "requirements.txt not found in the folder, please provide a valid requirements.txt file"
581
+ )
582
+ path = os.path.join(self.folder, 'requirements.txt')
583
+ # run the f"uv pip compile {path} --universal" command to validate the requirements.txt file
584
+ if not shutil.which('uv'):
585
+ raise Exception(
586
+ "uv command not found, please install uv to validate the requirements.txt file"
587
+ )
588
+ logger.info(f"Setup: Validating requirements.txt file at {path} using uv pip compile")
589
+ # Don't log the output of the comment unless it errors.
590
+ result = subprocess.run(
591
+ f"uv pip compile {path} --universal --python {python_version} --no-header --no-emit-index-url",
592
+ shell=True,
593
+ text=True,
594
+ capture_output=True,
595
+ check=False,
596
+ )
597
+ if result.returncode != 0:
598
+ logger.error(f"Error validating requirements.txt file: {result.stderr}")
599
+ logger.error(
600
+ "Failed to validate the requirements.txt file, please check the file for errors. Note this can happen if the machine you're upload from has different python version, accelerator, etc. from the desired machine you want to upload to."
601
+ )
602
+ logger.error("Output: " + result.stdout)
603
+ # If we have an error, raise an exception.
604
+ return False
605
+ else:
606
+ logger.info("Setup: Requirements.txt file validated successfully")
607
+ # If we have no error, we can just return.
608
+ return True
609
+
566
610
  def _is_amd(self):
567
611
  """
568
612
  Check if the model is AMD or not.
@@ -582,11 +626,50 @@ class ModelBuilder:
582
626
  "Both AMD and NVIDIA GPUs are specified in the config file, please use only one type of GPU."
583
627
  )
584
628
  if is_amd_gpu:
585
- logger.info("Using AMD base image to build the Docker image and upload the model")
629
+ logger.info(
630
+ "Setup: Using AMD base image to build the Docker image and upload the model"
631
+ )
586
632
  elif is_nvidia_gpu:
587
- logger.info("Using NVIDIA base image to build the Docker image and upload the model")
633
+ logger.info(
634
+ "Setup: Using NVIDIA base image to build the Docker image and upload the model"
635
+ )
588
636
  return is_amd_gpu
589
637
 
638
+ def _lint_python_code(self):
639
+ """
640
+ Lint the python code in the model.py file using flake8.
641
+ This will help catch any simple bugs in the code before uploading it to the API.
642
+ """
643
+ if not shutil.which('ruff'):
644
+ raise Exception("ruff command not found, please install ruff to lint the python code")
645
+ # List all the python files in the /1/ folder recursively and lint them.
646
+ python_files = []
647
+ for root, _, files in os.walk(os.path.join(self.folder, '1')):
648
+ for file in files:
649
+ if file.endswith('.py'):
650
+ python_files.append(os.path.join(root, file))
651
+ if not python_files:
652
+ logger.info("No Python files found to lint, skipping linting step.")
653
+ else:
654
+ logger.info(f"Setup: Linting Python files: {python_files}")
655
+ # Run ruff to lint the python code.
656
+ command = "ruff check --select=F"
657
+ result = subprocess.run(
658
+ f"{command} {' '.join(python_files)}",
659
+ shell=True,
660
+ text=True,
661
+ capture_output=True,
662
+ check=False,
663
+ )
664
+ if result.returncode != 0:
665
+ logger.error(f"Error linting Python code: {result.stderr}")
666
+ logger.error("Output: " + result.stdout)
667
+ logger.error(
668
+ f"Failed to lint the Python code, please check the code for errors using '{command}' so you don't have simple errors in your code prior to upload."
669
+ )
670
+ else:
671
+ logger.info("Setup: Python code linted successfully, no errors found.")
672
+
590
673
  def create_dockerfile(self):
591
674
  dockerfile_template = os.path.join(
592
675
  os.path.dirname(os.path.dirname(__file__)),
@@ -609,14 +692,21 @@ class ModelBuilder:
609
692
  )
610
693
 
611
694
  logger.info(
612
- f"Using Python version {python_version} from the config file to build the Dockerfile"
695
+ f"Setup: Using Python version {python_version} from the config file to build the Dockerfile"
613
696
  )
614
697
  else:
615
698
  logger.info(
616
- f"Python version not found in the config file, using default Python version: {DEFAULT_PYTHON_VERSION}"
699
+ f"Setup: Python version not found in the config file, using default Python version: {DEFAULT_PYTHON_VERSION}"
617
700
  )
618
701
  python_version = DEFAULT_PYTHON_VERSION
619
702
 
703
+ # Before we bother even picking the right base image, let's use uv to validate
704
+ # that the requirements.txt file is valid and compatible.
705
+ self._validate_requirements(python_version)
706
+
707
+ # Make sure any python code will not have simple bugs by linting it first.
708
+ self._lint_python_code()
709
+
620
710
  # Parse the requirements.txt file to determine the base image
621
711
  dependencies = self._parse_requirements()
622
712
 
@@ -637,7 +727,7 @@ class ModelBuilder:
637
727
  )
638
728
  if not torch_version:
639
729
  logger.info(
640
- f"torch version not found in requirements.txt, using the default version {DEFAULT_AMD_TORCH_VERSION}"
730
+ f"Setup: torch version not found in requirements.txt, using the default version {DEFAULT_AMD_TORCH_VERSION}"
641
731
  )
642
732
  torch_version = DEFAULT_AMD_TORCH_VERSION
643
733
  if torch_version not in [DEFAULT_AMD_TORCH_VERSION]:
@@ -651,7 +741,7 @@ class ModelBuilder:
651
741
  python_version=python_version,
652
742
  gpu_version=gpu_version,
653
743
  )
654
- logger.info("Using vLLM base image to build the Docker image")
744
+ logger.info("Setup: Using vLLM base image to build the Docker image")
655
745
  elif 'torch' in dependencies:
656
746
  torch_version = dependencies['torch']
657
747
  if python_version != DEFAULT_PYTHON_VERSION:
@@ -675,7 +765,7 @@ class ModelBuilder:
675
765
  gpu_version=gpu_version,
676
766
  )
677
767
  logger.info(
678
- f"Using Torch version {torch_version} base image to build the Docker image"
768
+ f"Setup: Using Torch version {torch_version} base image to build the Docker image"
679
769
  )
680
770
  else:
681
771
  final_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
@@ -684,6 +774,8 @@ class ModelBuilder:
684
774
  torch_version = dependencies['torch']
685
775
  # Sort in reverse so that newer cuda versions come first and are preferred.
686
776
  for image in sorted(AVAILABLE_TORCH_IMAGES, reverse=True):
777
+ if image.find('rocm') >= 0:
778
+ continue # skip ROCm images as those are handled above.
687
779
  if torch_version in image and f'py{python_version}' in image:
688
780
  # like cu124, rocm6.3, etc.
689
781
  gpu_version = image.split('-')[-1]
@@ -693,7 +785,7 @@ class ModelBuilder:
693
785
  gpu_version=gpu_version,
694
786
  )
695
787
  logger.info(
696
- f"Using Torch version {torch_version} base image to build the Docker image"
788
+ f"Setup: Using Torch version {torch_version} base image to build the Docker image"
697
789
  )
698
790
  break
699
791
  if 'clarifai' not in dependencies:
@@ -1109,4 +1201,208 @@ def upload_model(folder, stage, skip_dockerfile):
1109
1201
  )
1110
1202
 
1111
1203
  input("Press Enter to continue...")
1112
- builder.upload_model_version()
1204
+ model_version = builder.upload_model_version()
1205
+
1206
+ # Ask user if they want to deploy the model
1207
+ deploy_model = input("Do you want to deploy the model? (y/n): ")
1208
+ if deploy_model.lower() != 'y':
1209
+ logger.info("Model uploaded successfully. Skipping deployment setup.")
1210
+ return
1211
+
1212
+ # Setup deployment for the uploaded model
1213
+ setup_deployment_for_model(builder)
1214
+
1215
+
1216
+ def setup_deployment_for_model(builder):
1217
+ """
1218
+ Set up deployment for a model after upload.
1219
+
1220
+ :param builder: The ModelBuilder instance that has uploaded the model.
1221
+ """
1222
+
1223
+ model = builder.config.get('model')
1224
+ user_id = model.get('user_id')
1225
+ app_id = model.get('app_id')
1226
+ model_id = model.get('id')
1227
+
1228
+ # Set up the API client with the user's credentials
1229
+ user = User(user_id=user_id, pat=builder.client.pat, base_url=builder.client.base)
1230
+
1231
+ # Step 1: Check for available compute clusters and let user choose or create a new one
1232
+ logger.info("Checking for available compute clusters...")
1233
+ compute_clusters = list(user.list_compute_clusters())
1234
+
1235
+ compute_cluster = None
1236
+ if compute_clusters:
1237
+ logger.info("Available compute clusters:")
1238
+ for i, cc in enumerate(compute_clusters):
1239
+ logger.info(
1240
+ f"{i + 1}. {cc.id} ({cc.description if hasattr(cc, 'description') else 'No description'})"
1241
+ )
1242
+
1243
+ choice = input(
1244
+ f"Choose a compute cluster (1-{len(compute_clusters)}) or 'n' to create a new one: "
1245
+ )
1246
+ if choice.lower() == 'n':
1247
+ create_new_cc = True
1248
+ else:
1249
+ try:
1250
+ idx = int(choice) - 1
1251
+ if 0 <= idx < len(compute_clusters):
1252
+ compute_cluster = compute_clusters[idx]
1253
+ create_new_cc = False
1254
+ else:
1255
+ logger.info("Invalid choice. Creating a new compute cluster.")
1256
+ create_new_cc = True
1257
+ except ValueError:
1258
+ logger.info("Invalid choice. Creating a new compute cluster.")
1259
+ create_new_cc = True
1260
+ else:
1261
+ logger.info("No compute clusters found.")
1262
+ create_new_cc = True
1263
+
1264
+ if create_new_cc:
1265
+ # Provide URL to create a new compute cluster
1266
+ url_helper = ClarifaiUrlHelper()
1267
+ compute_cluster_url = f"{url_helper.ui}/settings/compute/new"
1268
+ logger.info(f"Please create a new compute cluster by visiting: {compute_cluster_url}")
1269
+
1270
+ # Ask if they want to open the URL in browser
1271
+ open_browser = input(
1272
+ "Do you want to open the compute cluster creation page in your browser? (y/n): "
1273
+ )
1274
+ if open_browser.lower() == 'y':
1275
+ try:
1276
+ webbrowser.open(compute_cluster_url)
1277
+ except Exception as e:
1278
+ logger.error(f"Failed to open browser: {e}")
1279
+
1280
+ input("After creating the compute cluster, press Enter to continue...")
1281
+
1282
+ # Re-fetch the compute clusters list after user has created one
1283
+ logger.info("Re-checking for available compute clusters...")
1284
+ compute_clusters = list(user.list_compute_clusters())
1285
+
1286
+ if not compute_clusters:
1287
+ logger.info(
1288
+ "No compute clusters found. Please make sure you have created a compute cluster and try again."
1289
+ )
1290
+ return
1291
+
1292
+ # Show the updated list and let user choose
1293
+ logger.info("Available compute clusters:")
1294
+ for i, cc in enumerate(compute_clusters):
1295
+ logger.info(
1296
+ f"{i + 1}. {cc.id} ({cc.description if hasattr(cc, 'description') else 'No description'})"
1297
+ )
1298
+
1299
+ choice = input(f"Choose a compute cluster (1-{len(compute_clusters)}): ")
1300
+ try:
1301
+ idx = int(choice) - 1
1302
+ if 0 <= idx < len(compute_clusters):
1303
+ compute_cluster = compute_clusters[idx]
1304
+ else:
1305
+ logger.info("Invalid choice. Aborting deployment setup.")
1306
+ return
1307
+ except ValueError:
1308
+ logger.info("Invalid choice. Aborting deployment setup.")
1309
+ return
1310
+
1311
+ # Step 2: Check for available nodepools and let user choose or create a new one
1312
+ logger.info(f"Checking for available nodepools in compute cluster '{compute_cluster.id}'...")
1313
+ nodepools = list(compute_cluster.list_nodepools())
1314
+
1315
+ nodepool = None
1316
+ if nodepools:
1317
+ logger.info("Available nodepools:")
1318
+ for i, np in enumerate(nodepools):
1319
+ logger.info(
1320
+ f"{i + 1}. {np.id} ({np.description if hasattr(np, 'description') else 'No description'})"
1321
+ )
1322
+
1323
+ choice = input(f"Choose a nodepool (1-{len(nodepools)}) or 'n' to create a new one: ")
1324
+ if choice.lower() == 'n':
1325
+ create_new_np = True
1326
+ else:
1327
+ try:
1328
+ idx = int(choice) - 1
1329
+ if 0 <= idx < len(nodepools):
1330
+ nodepool = nodepools[idx]
1331
+ create_new_np = False
1332
+ else:
1333
+ logger.info("Invalid choice. Creating a new nodepool.")
1334
+ create_new_np = True
1335
+ except ValueError:
1336
+ logger.info("Invalid choice. Creating a new nodepool.")
1337
+ create_new_np = True
1338
+ else:
1339
+ logger.info("No nodepools found in this compute cluster.")
1340
+ create_new_np = True
1341
+
1342
+ if create_new_np:
1343
+ # Provide URL to create a new nodepool
1344
+ url_helper = ClarifaiUrlHelper()
1345
+ nodepool_url = f"{url_helper.ui}/settings/compute/{compute_cluster.id}/nodepools/new"
1346
+ logger.info(f"Please create a new nodepool by visiting: {nodepool_url}")
1347
+
1348
+ # Ask if they want to open the URL in browser
1349
+ open_browser = input(
1350
+ "Do you want to open the nodepool creation page in your browser? (y/n): "
1351
+ )
1352
+ if open_browser.lower() == 'y':
1353
+ try:
1354
+ webbrowser.open(nodepool_url)
1355
+ except Exception as e:
1356
+ logger.error(f"Failed to open browser: {e}")
1357
+
1358
+ input("After creating the nodepool, press Enter to continue...")
1359
+
1360
+ # Re-fetch the nodepools list after user has created one
1361
+ logger.info(
1362
+ f"Re-checking for available nodepools in compute cluster '{compute_cluster.id}'..."
1363
+ )
1364
+ nodepools = list(compute_cluster.list_nodepools())
1365
+
1366
+ if not nodepools:
1367
+ logger.info(
1368
+ "No nodepools found. Please make sure you have created a nodepool in the selected compute cluster and try again."
1369
+ )
1370
+ return
1371
+
1372
+ # Show the updated list and let user choose
1373
+ logger.info("Available nodepools:")
1374
+ for i, np in enumerate(nodepools):
1375
+ logger.info(
1376
+ f"{i + 1}. {np.id} ({np.description if hasattr(np, 'description') else 'No description'})"
1377
+ )
1378
+
1379
+ choice = input(f"Choose a nodepool (1-{len(nodepools)}): ")
1380
+ try:
1381
+ idx = int(choice) - 1
1382
+ if 0 <= idx < len(nodepools):
1383
+ nodepool = nodepools[idx]
1384
+ else:
1385
+ logger.info("Invalid choice. Aborting deployment setup.")
1386
+ return
1387
+ except ValueError:
1388
+ logger.info("Invalid choice. Aborting deployment setup.")
1389
+ return
1390
+
1391
+ # Step 3: Help create a new deployment by providing URL
1392
+ # Provide URL to create a new deployment
1393
+ url_helper = ClarifaiUrlHelper()
1394
+ deployment_url = f"{url_helper.ui}/settings/compute/deployments/new?computeClusterId={compute_cluster.id}&nodePoolId={nodepool.id}"
1395
+ logger.info(f"Please create a new deployment by visiting: {deployment_url}")
1396
+
1397
+ # Ask if they want to open the URL in browser
1398
+ open_browser = input(
1399
+ "Do you want to open the deployment creation page in your browser? (y/n): "
1400
+ )
1401
+ if open_browser.lower() == 'y':
1402
+ try:
1403
+ webbrowser.open(deployment_url)
1404
+ except Exception as e:
1405
+ logger.error(f"Failed to open browser: {e}")
1406
+
1407
+ logger.info("After creating the deployment, your model will be ready for inference!")
1408
+ logger.info(f"You can always return to view your deployments at: {deployment_url}")
@@ -20,6 +20,15 @@ class OpenAIModelClass(ModelClass):
20
20
  model = "gpt-4"
21
21
  """
22
22
 
23
+ # API Endpoints
24
+ ENDPOINT_CHAT_COMPLETIONS = "/chat/completions"
25
+ ENDPOINT_IMAGES_GENERATE = "/images/generations"
26
+ ENDPOINT_EMBEDDINGS = "/embeddings"
27
+ ENDPOINT_RESPONSES = "/responses"
28
+
29
+ # Default endpoint
30
+ DEFAULT_ENDPOINT = ENDPOINT_CHAT_COMPLETIONS
31
+
23
32
  # These should be overridden in subclasses
24
33
  client = None
25
34
  model = None
@@ -35,141 +44,100 @@ class OpenAIModelClass(ModelClass):
35
44
  "Subclasses must set the 'model' class attribute or ensure the client can list models"
36
45
  ) from e
37
46
 
38
- def _extract_request_params(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
39
- """Extract and validate common openai arguments parameters from the request data.
40
-
41
- Args:
42
- request_data: The parsed JSON request data
43
-
44
- Returns:
45
- Dict containing the extracted parameters
46
- """
47
- return {
48
- "messages": request_data.get("messages", []),
49
- "temperature": request_data.get("temperature", 1.0),
50
- "max_tokens": request_data.get("max_tokens"),
51
- "max_completion_tokens": request_data.get("max_completion_tokens"),
52
- "n": request_data.get("n", 1),
53
- "frequency_penalty": request_data.get("frequency_penalty"),
54
- "presence_penalty": request_data.get("presence_penalty"),
55
- "top_p": request_data.get("top_p", 1.0),
56
- "reasoning_effort": request_data.get("reasoning_effort"),
57
- "response_format": request_data.get("response_format"),
58
- "stop": request_data.get("stop"),
59
- "tools": request_data.get("tools"),
60
- "tool_choice": request_data.get("tool_choice"),
61
- "tool_resources": request_data.get("tool_resources"),
62
- "modalities": request_data.get("modalities"),
63
- "stream_options": request_data.get("stream_options", {"include_usage": True}),
64
- }
65
-
66
- def _create_completion_args(
67
- self, params: Dict[str, Any], stream: bool = False
68
- ) -> Dict[str, Any]:
47
+ def _create_completion_args(self, params: Dict[str, Any]) -> Dict[str, Any]:
69
48
  """Create the completion arguments dictionary from parameters.
70
49
 
71
50
  Args:
72
51
  params: Dictionary of parameters extracted from request
73
- stream: Whether this is a streaming request
74
52
 
75
53
  Returns:
76
54
  Dict containing the completion arguments
77
55
  """
78
- completion_args = {
79
- "model": self.model,
80
- "messages": params["messages"],
81
- "temperature": params["temperature"],
82
- }
83
-
56
+ completion_args = {**params}
57
+ completion_args.update({"model": self.model})
58
+ stream = completion_args.pop("stream", False)
84
59
  if stream:
85
- completion_args["stream"] = True
86
- if params.get("stream_options"):
87
- completion_args["stream_options"] = params["stream_options"]
88
-
89
- # Add optional parameters if they exist
90
- optional_params = [
91
- "max_tokens",
92
- "max_completion_tokens",
93
- "n",
94
- "frequency_penalty",
95
- "presence_penalty",
96
- "top_p",
97
- "reasoning_effort",
98
- "response_format",
99
- "stop",
100
- "tools",
101
- "tool_choice",
102
- "tool_resources",
103
- "modalities",
104
- ]
105
-
106
- for param in optional_params:
107
- if params.get(param) is not None:
108
- completion_args[param] = params[param]
60
+ # Force to use usage
61
+ stream_options = params.pop("stream_options", {})
62
+ stream_options.update({"include_usage": True})
63
+ completion_args["stream_options"] = stream_options
64
+ completion_args["stream"] = stream
109
65
 
110
66
  return completion_args
111
67
 
112
- def _format_error_response(self, error: Exception) -> str:
113
- """Format an error response in OpenAI-compatible format.
68
+ def _set_usage(self, resp):
69
+ if resp.usage and resp.usage.prompt_tokens and resp.usage.completion_tokens:
70
+ self.set_output_context(
71
+ prompt_tokens=resp.usage.prompt_tokens,
72
+ completion_tokens=resp.usage.completion_tokens,
73
+ )
74
+
75
+ def _handle_chat_completions(self, request_data: Dict[str, Any]):
76
+ """Handle chat completion requests."""
77
+ completion_args = self._create_completion_args(request_data)
78
+ completion = self.client.chat.completions.create(**completion_args)
79
+ self._set_usage(completion)
80
+ return completion
81
+
82
+ def _handle_images_generate(self, request_data: Dict[str, Any]):
83
+ """Handle image generation requests."""
84
+ image_args = {**request_data}
85
+ image_args.update({"model": self.model})
86
+ response = self.client.images.generate(**image_args)
87
+ return response
88
+
89
+ def _handle_embeddings(self, request_data: Dict[str, Any]):
90
+ """Handle embedding requests."""
91
+ embedding_args = {**request_data}
92
+ embedding_args.update({"model": self.model})
93
+ response = self.client.embeddings.create(**embedding_args)
94
+ return response
95
+
96
+ def _handle_responses(self, request_data: Dict[str, Any]):
97
+ """Handle response requests."""
98
+ response_args = {**request_data}
99
+ response_args.update({"model": self.model})
100
+ response = self.client.responses.create(**response_args)
101
+ return response
102
+
103
+ def _route_request(self, endpoint: str, request_data: Dict[str, Any]):
104
+ """Route the request to appropriate handler based on endpoint."""
105
+ handlers = {
106
+ self.ENDPOINT_CHAT_COMPLETIONS: self._handle_chat_completions,
107
+ self.ENDPOINT_IMAGES_GENERATE: self._handle_images_generate,
108
+ self.ENDPOINT_EMBEDDINGS: self._handle_embeddings,
109
+ self.ENDPOINT_RESPONSES: self._handle_responses,
110
+ }
114
111
 
115
- Args:
116
- error: The exception that occurred
112
+ handler = handlers.get(endpoint)
113
+ if not handler:
114
+ raise ValueError(f"Unsupported endpoint: {endpoint}")
117
115
 
118
- Returns:
119
- JSON string containing the error response
120
- """
121
- error_response = {
122
- "error": {
123
- "message": str(error),
124
- "type": "InvalidRequestError",
125
- "code": "invalid_request_error",
126
- }
127
- }
128
- return json.dumps(error_response)
116
+ return handler(request_data)
129
117
 
130
118
  @ModelClass.method
131
119
  def openai_transport(self, msg: str) -> str:
132
- """The single model method to get the OpenAI-compatible request and send it to the OpenAI server
133
- then return its response.
120
+ """Process an OpenAI-compatible request and send it to the appropriate OpenAI endpoint.
134
121
 
135
122
  Args:
136
- msg: JSON string containing the request parameters
123
+ msg: JSON string containing the request parameters including 'openai_endpoint'
137
124
 
138
125
  Returns:
139
126
  JSON string containing the response or error
140
127
  """
141
128
  try:
142
129
  request_data = json.loads(msg)
143
- params = self._extract_request_params(request_data)
144
- stream = request_data.get("stream", False)
145
-
146
- if stream:
147
- chunks = self._process_streaming_request(**params)
148
- response_list = []
149
- for chunk in chunks:
150
- response_list.append(chunk)
151
- return json.dumps(response_list)
152
- else:
153
- completion = self._process_request(**params)
154
- if completion.get('usage'):
155
- if completion['usage'].get('prompt_tokens') and completion['usage'].get(
156
- 'completion_tokens'
157
- ):
158
- self.set_output_context(
159
- prompt_tokens=completion['usage']['prompt_tokens'],
160
- completion_tokens=completion['usage']['completion_tokens'],
161
- )
162
-
163
- return json.dumps(completion)
164
-
130
+ endpoint = request_data.pop("openai_endpoint", self.DEFAULT_ENDPOINT)
131
+ response = self._route_request(endpoint, request_data)
132
+ return json.dumps(response.model_dump())
165
133
  except Exception as e:
166
- return self._format_error_response(e)
134
+ return f"Error: {e}"
167
135
 
168
136
  @ModelClass.method
169
137
  def openai_stream_transport(self, msg: str) -> Iterator[str]:
170
138
  """Process an OpenAI-compatible request and return a streaming response iterator.
171
139
  This method is used when stream=True and returns an iterator of strings directly,
172
- without converting to a list or JSON serializing.
140
+ without converting to a list or JSON serializing. Supports chat completions and responses endpoints.
173
141
 
174
142
  Args:
175
143
  msg: The request as a JSON string.
@@ -179,43 +147,21 @@ class OpenAIModelClass(ModelClass):
179
147
  """
180
148
  try:
181
149
  request_data = json.loads(msg)
182
- params = self._extract_request_params(request_data)
183
- for chunk in self._process_streaming_request(**params):
184
- if chunk.get('usage'):
185
- if chunk['usage'].get('prompt_tokens') and chunk['usage'].get(
186
- 'completion_tokens'
187
- ):
188
- self.set_output_context(
189
- prompt_tokens=chunk['usage']['prompt_tokens'],
190
- completion_tokens=chunk['usage']['completion_tokens'],
191
- )
192
- yield json.dumps(chunk)
193
- except Exception as e:
194
- yield f"Error: {str(e)}"
195
-
196
- def _process_request(self, **kwargs) -> Any:
197
- """Process a standard (non-streaming) request using the OpenAI client.
198
-
199
- Args:
200
- **kwargs: Request parameters
201
-
202
- Returns:
203
- The completion response from the OpenAI client
204
- """
205
- completion_args = self._create_completion_args(kwargs)
206
- return self.client.chat.completions.create(**completion_args).to_dict()
207
-
208
- def _process_streaming_request(self, **kwargs) -> Iterator[str]:
209
- """Process a streaming request using the OpenAI client.
210
-
211
- Args:
212
- **kwargs: Request parameters
213
-
214
- Returns:
215
- Iterator yielding response chunks
216
- """
217
- completion_args = self._create_completion_args(kwargs, stream=True)
218
- completion_stream = self.client.chat.completions.create(**completion_args)
150
+ endpoint = request_data.pop("openai_endpoint", self.DEFAULT_ENDPOINT)
151
+ if endpoint not in [self.ENDPOINT_CHAT_COMPLETIONS, self.ENDPOINT_RESPONSES]:
152
+ raise ValueError("Streaming is only supported for chat completions and responses.")
153
+
154
+ if endpoint == self.ENDPOINT_RESPONSES:
155
+ # Handle responses endpoint
156
+ stream_response = self._route_request(endpoint, request_data)
157
+ for chunk in stream_response:
158
+ yield json.dumps(chunk.model_dump())
159
+ else:
160
+ completion_args = self._create_completion_args(request_data)
161
+ stream_completion = self.client.chat.completions.create(**completion_args)
162
+ for chunk in stream_completion:
163
+ self._set_usage(chunk)
164
+ yield json.dumps(chunk.model_dump())
219
165
 
220
- for chunk in completion_stream:
221
- yield chunk.to_dict()
166
+ except Exception as e:
167
+ yield f"Error: {e}"
@@ -52,7 +52,7 @@ class VisualClassifierClass(ModelClass):
52
52
 
53
53
  @staticmethod
54
54
  def process_concepts(
55
- logits: torch.Tensor, threshold: float, model_labels: Dict[int, str]
55
+ logits: torch.Tensor, model_labels: Dict[int, str]
56
56
  ) -> List[List[Concept]]:
57
57
  """Convert model logits into a structured format of concepts.
58
58
 
@@ -76,7 +76,7 @@ client = OpenAI(
76
76
  response = client.chat.completions.create(
77
77
  model="%s",
78
78
  messages=[
79
- {"role": "developer", "content": "Talk like a pirate."},
79
+ {"role": "system", "content": "Talk like a pirate."},
80
80
  {
81
81
  "role": "user",
82
82
  "content": "How do I check if a Python object is an instance of a class?",
@@ -111,7 +111,7 @@ model = Model.from_current_context()"""
111
111
  else:
112
112
  model_ui_url = url_helper.clarifai_url(user_id, app_id, "models", model_id)
113
113
  model_section = f"""
114
- model = Model({model_ui_url},
114
+ model = Model("{model_ui_url}",
115
115
  deployment_id = {deployment_id}, # Only needed for dedicated deployed models
116
116
  {base_url_str}
117
117
  )
@@ -133,6 +133,8 @@ model = Model({model_ui_url},
133
133
  continue
134
134
  if default_value is None and required:
135
135
  default_value = _set_default_value(param_type)
136
+ if param_type == "str" and default_value is not None:
137
+ default_value = json.dumps(default_value)
136
138
  client_script_str += f"{param_name}={default_value}, "
137
139
  client_script_str = client_script_str.rstrip(", ") + ")"
138
140
  if method_signature.method_type == resources_pb2.RunnerMethodType.UNARY_UNARY:
@@ -2,9 +2,9 @@ import os
2
2
 
3
3
  registry = os.environ.get('CLARIFAI_BASE_IMAGE_REGISTRY', 'public.ecr.aws/clarifai-models')
4
4
 
5
- GIT_SHA = "b8ae56bf3b7c95e686ca002b07ca83d259c716eb"
5
+ GIT_SHA = "42938da8e33b0f37ee7db16b83631da94c2348b9"
6
6
 
7
- AMD_GIT_SHA = "81e942130173f54927e7c9a65aabc7e32780616d"
7
+ AMD_GIT_SHA = "42938da8e33b0f37ee7db16b83631da94c2348b9"
8
8
 
9
9
  PYTHON_BASE_IMAGE = registry + '/python-base:{python_version}-' + GIT_SHA
10
10
  TORCH_BASE_IMAGE = registry + '/torch:{torch_version}-py{python_version}-{gpu_version}-' + GIT_SHA
@@ -20,7 +20,7 @@ AMD_VLLM_BASE_IMAGE = (
20
20
  # List of available python base images
21
21
  AVAILABLE_PYTHON_IMAGES = ['3.11', '3.12']
22
22
 
23
- DEFAULT_PYTHON_VERSION = 3.12
23
+ DEFAULT_PYTHON_VERSION = '3.12'
24
24
 
25
25
  DEFAULT_AMD_TORCH_VERSION = '2.8.0.dev20250511+rocm6.4'
26
26
 
@@ -377,8 +377,14 @@ class Param(MessageData):
377
377
 
378
378
  if proto is None:
379
379
  proto = ParamProto()
380
- proto.default = json.dumps(default)
380
+
381
+ if isinstance(default, str):
382
+ proto.default = default
383
+ else:
384
+ proto.default = json.dumps(default)
385
+
381
386
  return proto
387
+
382
388
  except Exception:
383
389
  if default is not None:
384
390
  proto.default = str(default)
@@ -16,12 +16,9 @@ def clean_up_unused_keys(wf: dict):
16
16
  new_wf["model"] = {
17
17
  "model_id": wf["model"]["id"],
18
18
  "model_version_id": wf["model"]["model_version"]["id"],
19
+ "user_id": wf["model"]["user_id"],
20
+ "app_id": wf["model"]["app_id"],
19
21
  }
20
- # If the model is not from clarifai main, add the app_id and user_id to the model dict.
21
- if wf["model"]["user_id"] != "clarifai" and wf["model"]["app_id"] != "main":
22
- new_wf["model"].update(
23
- {"app_id": wf["model"]["app_id"], "user_id": wf["model"]["user_id"]}
24
- )
25
22
  elif isinstance(val, dict):
26
23
  new_wf[key] = clean_up_unused_keys(val)
27
24
  elif isinstance(val, list):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clarifai
3
- Version: 11.4.10
3
+ Version: 11.5.1
4
4
  Home-page: https://github.com/Clarifai/clarifai-python
5
5
  Author: Clarifai
6
6
  Author-email: support@clarifai.com
@@ -19,8 +19,8 @@ Classifier: Operating System :: OS Independent
19
19
  Requires-Python: >=3.8
20
20
  Description-Content-Type: text/markdown
21
21
  License-File: LICENSE
22
- Requires-Dist: clarifai-grpc>=11.3.4
23
- Requires-Dist: clarifai-protocol>=0.0.23
22
+ Requires-Dist: clarifai-grpc>=11.5.5
23
+ Requires-Dist: clarifai-protocol>=0.0.24
24
24
  Requires-Dist: numpy>=1.22.0
25
25
  Requires-Dist: tqdm>=4.65.0
26
26
  Requires-Dist: PyYAML>=6.0.1
@@ -31,6 +31,8 @@ Requires-Dist: fsspec>=2024.6.1
31
31
  Requires-Dist: click>=8.1.7
32
32
  Requires-Dist: requests>=2.32.3
33
33
  Requires-Dist: aiohttp>=3.10.0
34
+ Requires-Dist: uv==0.7.12
35
+ Requires-Dist: ruff==0.11.4
34
36
  Provides-Extra: all
35
37
  Requires-Dist: pycocotools>=2.0.7; extra == "all"
36
38
  Dynamic: author
@@ -1,4 +1,4 @@
1
- clarifai/__init__.py,sha256=R3Gdk_es4xqWH9Q0o6LQE5mk81Nh9rli5JkK7PNOfDs,24
1
+ clarifai/__init__.py,sha256=A0jDR9AvI6Hw5Et83NIgHO8iH3ugm5KfZXEJRyobuHc,23
2
2
  clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  clarifai/errors.py,sha256=GXa6D4v_L404J83jnRNFPH7s-1V9lk7w6Ws99f1g-AY,2772
4
4
  clarifai/versions.py,sha256=ecSuEB_nOL2XSoYHDw2n23XUbm_KPOGjudMXmQrGdS8,224
@@ -12,7 +12,7 @@ clarifai/cli/model.py,sha256=9sJ5p4TJc41vOeeFoMC3e8cciscSU7TmvAgW0wfO54U,30191
12
12
  clarifai/cli/model_templates.py,sha256=_ZonIBnY9KKSJY31KZbUys_uN_k_Txu7Dip12KWfmSU,9633
13
13
  clarifai/cli/nodepool.py,sha256=H6OIdUW_EiyDUwZogzEDoYmVwEjLMsgoDlPyE7gjIuU,4245
14
14
  clarifai/client/__init__.py,sha256=NhpNFRJY6mTi8ca-5hUeTEmYeDKHDNXY48FN63pDuos,703
15
- clarifai/client/app.py,sha256=D0FG9v07g1dExLnQsYt0OQjsJCkVvuw76BOpcqaCzfM,41380
15
+ clarifai/client/app.py,sha256=1M9XDsPWIEsj0g-mgIeZ9Mvkt85UHSbrv6pEr-QKfNg,41423
16
16
  clarifai/client/base.py,sha256=zOmB5HJP_-NmF2BPka14W7VUeJ1OF-fNxeacLsaRj3E,8775
17
17
  clarifai/client/compute_cluster.py,sha256=ViPyh-FibXL1J0ypsVOTaQnR1ymKohmZEuA13RwA-hc,10254
18
18
  clarifai/client/dataset.py,sha256=OgdpZkQ_vYmRxL8-qphcNozpvPV1bWTlte9Jv6UkKb8,35299
@@ -64,24 +64,24 @@ clarifai/modules/style.css,sha256=j7FNPZVhLPj35vvBksAJ90RuX5sLuqzDR5iM2WIEhiA,60
64
64
  clarifai/rag/__init__.py,sha256=wu3PzAzo7uqgrEzuaC9lY_3gj1HFiR3GU3elZIKTT5g,40
65
65
  clarifai/rag/rag.py,sha256=EG3GoFrHFCmA70Tz49_0Jo1-3WIaHSgWGHecPeErcdc,14170
66
66
  clarifai/rag/utils.py,sha256=_gVZdABuMnraCKViLruV75x0F3IpgFXN6amYSGE5_xc,4462
67
- clarifai/runners/__init__.py,sha256=CQhpUOj_x-oV9xEUKdL-hi3A1BQAtPUv-FFOev4a96w,281
67
+ clarifai/runners/__init__.py,sha256=wXLaSljH7qLeJCrZdKEnlQh2tNqTQAIZKWOu2rZ6wGs,279
68
68
  clarifai/runners/server.py,sha256=9qVAs8pRHmtyY0RCNIQ1uP8nqDADIFZ03LnkoDt1h4U,4692
69
- clarifai/runners/dockerfile_template/Dockerfile.template,sha256=5cjv7U8PmWa3DB_5B1CqSYh_6GE0E0np52TIAa7EIDE,2312
69
+ clarifai/runners/dockerfile_template/Dockerfile.template,sha256=DUH7F0-uLOV0LTjnPde-9chSzscAAxBAwjTxi9b_l9g,2425
70
70
  clarifai/runners/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
71
- clarifai/runners/models/dummy_openai_model.py,sha256=5oa7r8j1GpymenpmMfaAV9Vt6VObKvrT9tDXgFmP4qY,7208
72
- clarifai/runners/models/mcp_class.py,sha256=7uwCMade0LMMBq7vczhPf4Kxdmh8Rj0R7Pg3pPxYdjQ,6386
73
- clarifai/runners/models/model_builder.py,sha256=PiqPyTGPSKsYvOQNpBzs4e1_wuEbtE-P3yEkLE4Py10,49231
71
+ clarifai/runners/models/dummy_openai_model.py,sha256=pcmAVbqTTGG4J3BLVjKfvM_SQ-GET_XexIUdLcr9Zvo,8373
72
+ clarifai/runners/models/mcp_class.py,sha256=RdKn7rW4vYol0VRDZiLTSMfkqjLhO1ijXAQ0Rq0Jfnw,6647
73
+ clarifai/runners/models/model_builder.py,sha256=s5TpCnLeiYLBgWHlCT6x6vpwTGX52oywViU6xDRl0MU,61599
74
74
  clarifai/runners/models/model_class.py,sha256=-euUF-eHUi4KXR_e1pIwvToDZ13CM6TSz2FolzildjM,16069
75
75
  clarifai/runners/models/model_run_locally.py,sha256=6-6WjEKc0ba3gAv4wOLdMs2XOzS3b-2bZHJS0wdVqJY,20088
76
76
  clarifai/runners/models/model_runner.py,sha256=SccX-RxTgruSpQaM21uMSl-z1x6fOa13fQZMQW8NNRY,7297
77
77
  clarifai/runners/models/model_servicer.py,sha256=rRd_fNEXwqiBSzTUtPI2r07EBdcCPd8tcSPHeqTe0_I,3445
78
- clarifai/runners/models/openai_class.py,sha256=3u6K7vTdYybxdTT3t3pYh9wvQyWhkL595t7hR5IkljU,8159
79
- clarifai/runners/models/visual_classifier_class.py,sha256=f9ZP8KFamMUdMpUG3AlL9nVCdcggy_E5n9RJY3ixR1U,2739
78
+ clarifai/runners/models/openai_class.py,sha256=aXlk5W6LWkh-A4eZYi74DeLW0i_86_9DYYGxpJHXI0w,6688
79
+ clarifai/runners/models/visual_classifier_class.py,sha256=1ZoLfCT2crrgRbejjTMAIwpTRgQMiH9N9yflOVpFxSg,2721
80
80
  clarifai/runners/models/visual_detector_class.py,sha256=ky4oFAkGCKPpGPdgaOso-n6D3HcmnbKee_8hBsNiV8U,2883
81
81
  clarifai/runners/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
- clarifai/runners/utils/code_script.py,sha256=fiuwsY-uxmMzQXBv1V8Gpw0n6s_xzrigAk4J5htOAao,12646
83
- clarifai/runners/utils/const.py,sha256=Q4Ps6gIEJCyTdQCfmT6PaS61WHmhT25XigV1NugWz-E,1544
84
- clarifai/runners/utils/data_utils.py,sha256=4M4n-cGprBEBV5UkgOWaUlVfZ3WBTmegdffGQ3SfYCU,20750
82
+ clarifai/runners/utils/code_script.py,sha256=1n525IhMEdWlr9jBUG76tMa42piZtl0CmWYBmkvu368,12769
83
+ clarifai/runners/utils/const.py,sha256=B0TnmVgjh5NlsXIkowrlgz2QdsgLj4a-gu4Igc9ukCo,1546
84
+ clarifai/runners/utils/data_utils.py,sha256=HRpMYR2O0OiDpXXhOManLHTeomC4bFnXMHVAiT_12yE,20856
85
85
  clarifai/runners/utils/loader.py,sha256=K5Y8MPbIe5STw2gDnrL8KqFgKNxEo7bz-RV0ip1T4PM,10900
86
86
  clarifai/runners/utils/method_signatures.py,sha256=qdHaO8ZIgP6BBXXMhMPhcQ46dse-XMP2t4VJCNG7O3Q,18335
87
87
  clarifai/runners/utils/openai_convertor.py,sha256=ZlIrvvfHttD_DavLvmKZdL8gNq_TQvQtZVnYamwdWz4,8248
@@ -104,12 +104,12 @@ clarifai/utils/evaluation/helpers.py,sha256=0t6eIDXeZEoiVvnmHTnsIF_-v4BzrQW1hFaq
104
104
  clarifai/utils/evaluation/main.py,sha256=N_sfRuMjHrUeuWN0Pzms65M1PbkQkgYg3WoQVaDR1Jw,17764
105
105
  clarifai/utils/evaluation/testset_annotation_parser.py,sha256=Nmodfi5BYFYEbybWcC8tmU5-wtwRBsWIbnpd3OvKSmA,5414
106
106
  clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
107
- clarifai/workflows/export.py,sha256=Oq3RVNKvv1iH46U6oIjXa-MXWJ4sTlXr_NSfwoxr3H4,2149
107
+ clarifai/workflows/export.py,sha256=HvUYG9N_-UZoRR0-_tdGbZ950_AeBqawSppgUxQebR0,1913
108
108
  clarifai/workflows/utils.py,sha256=ESL3INcouNcLKCh-nMpfXX-YbtCzX7tz7hT57_RGQ3M,2079
109
109
  clarifai/workflows/validate.py,sha256=UhmukyHkfxiMFrPPeBdUTiCOHQT5-shqivlBYEyKTlU,2931
110
- clarifai-11.4.10.dist-info/licenses/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
111
- clarifai-11.4.10.dist-info/METADATA,sha256=fsyNNmQS46MJ-hkKLiw2yK23x516-GOjPrm0CNOcNrI,22683
112
- clarifai-11.4.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
113
- clarifai-11.4.10.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
114
- clarifai-11.4.10.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
115
- clarifai-11.4.10.dist-info/RECORD,,
110
+ clarifai-11.5.1.dist-info/licenses/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
111
+ clarifai-11.5.1.dist-info/METADATA,sha256=-TWdB_qQgO1GbJrjYq6dpEn8wJVgftsyK_E664VBEgU,22736
112
+ clarifai-11.5.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
113
+ clarifai-11.5.1.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
114
+ clarifai-11.5.1.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
115
+ clarifai-11.5.1.dist-info/RECORD,,