clarifai 11.5.6__py3-none-any.whl → 11.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clarifai/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "11.5.6"
1
+ __version__ = "11.6.0"
clarifai/cli/model.py CHANGED
@@ -120,14 +120,22 @@ def init(model_path, model_type_id):
120
120
  is_flag=True,
121
121
  help='Flag to skip generating a dockerfile so that you can manually edit an already created dockerfile.',
122
122
  )
123
- def upload(model_path, stage, skip_dockerfile):
123
+ @click.pass_context
124
+ def upload(ctx, model_path, stage, skip_dockerfile):
124
125
  """Upload a model to Clarifai.
125
126
 
126
127
  MODEL_PATH: Path to the model directory. If not specified, the current directory is used by default.
127
128
  """
128
129
  from clarifai.runners.models.model_builder import upload_model
129
130
 
130
- upload_model(model_path, stage, skip_dockerfile)
131
+ validate_context(ctx)
132
+ upload_model(
133
+ model_path,
134
+ stage,
135
+ skip_dockerfile,
136
+ pat=ctx.obj.current.pat,
137
+ base_url=ctx.obj.current.api_base,
138
+ )
131
139
 
132
140
 
133
141
  @model.command()
clarifai/cli/pipeline.py CHANGED
@@ -27,6 +27,146 @@ def upload(path):
27
27
  upload_pipeline(path)
28
28
 
29
29
 
30
+ @pipeline.command()
31
+ @click.option(
32
+ '--config',
33
+ type=click.Path(exists=True),
34
+ required=False,
35
+ help='Path to the pipeline run config file.',
36
+ )
37
+ @click.option('--pipeline_id', required=False, help='Pipeline ID to run.')
38
+ @click.option('--pipeline_version_id', required=False, help='Pipeline Version ID to run.')
39
+ @click.option(
40
+ '--pipeline_version_run_id',
41
+ required=False,
42
+ help='Pipeline Version Run ID. If not provided, a UUID will be generated.',
43
+ )
44
+ @click.option('--user_id', required=False, help='User ID of the pipeline.')
45
+ @click.option('--app_id', required=False, help='App ID that contains the pipeline.')
46
+ @click.option('--nodepool_id', required=False, help='Nodepool ID to run the pipeline on.')
47
+ @click.option(
48
+ '--compute_cluster_id', required=False, help='Compute Cluster ID to run the pipeline on.'
49
+ )
50
+ @click.option('--pipeline_url', required=False, help='Pipeline URL to run.')
51
+ @click.option(
52
+ '--timeout',
53
+ type=int,
54
+ default=3600,
55
+ help='Maximum time to wait for completion in seconds. Default 3600 (1 hour).',
56
+ )
57
+ @click.option(
58
+ '--monitor_interval',
59
+ type=int,
60
+ default=10,
61
+ help='Interval between status checks in seconds. Default 10.',
62
+ )
63
+ @click.option(
64
+ '--log_file',
65
+ type=click.Path(),
66
+ required=False,
67
+ help='Path to file where logs should be written. If not provided, logs are displayed on console.',
68
+ )
69
+ @click.option(
70
+ '--monitor',
71
+ is_flag=True,
72
+ default=False,
73
+ help='Monitor an existing pipeline run instead of starting a new one. Requires pipeline_version_run_id.',
74
+ )
75
+ @click.pass_context
76
+ def run(
77
+ ctx,
78
+ config,
79
+ pipeline_id,
80
+ pipeline_version_id,
81
+ pipeline_version_run_id,
82
+ user_id,
83
+ app_id,
84
+ nodepool_id,
85
+ compute_cluster_id,
86
+ pipeline_url,
87
+ timeout,
88
+ monitor_interval,
89
+ log_file,
90
+ monitor,
91
+ ):
92
+ """Run a pipeline and monitor its progress."""
93
+ import json
94
+
95
+ from clarifai.client.pipeline import Pipeline
96
+ from clarifai.utils.cli import from_yaml, validate_context
97
+
98
+ validate_context(ctx)
99
+
100
+ if config:
101
+ config_data = from_yaml(config)
102
+ pipeline_id = config_data.get('pipeline_id', pipeline_id)
103
+ pipeline_version_id = config_data.get('pipeline_version_id', pipeline_version_id)
104
+ pipeline_version_run_id = config_data.get(
105
+ 'pipeline_version_run_id', pipeline_version_run_id
106
+ )
107
+ user_id = config_data.get('user_id', user_id)
108
+ app_id = config_data.get('app_id', app_id)
109
+ nodepool_id = config_data.get('nodepool_id', nodepool_id)
110
+ compute_cluster_id = config_data.get('compute_cluster_id', compute_cluster_id)
111
+ pipeline_url = config_data.get('pipeline_url', pipeline_url)
112
+ timeout = config_data.get('timeout', timeout)
113
+ monitor_interval = config_data.get('monitor_interval', monitor_interval)
114
+ log_file = config_data.get('log_file', log_file)
115
+ monitor = config_data.get('monitor', monitor)
116
+
117
+ # compute_cluster_id and nodepool_id are mandatory regardless of whether pipeline_url is provided
118
+ if not compute_cluster_id or not nodepool_id:
119
+ raise ValueError("--compute_cluster_id and --nodepool_id are mandatory parameters.")
120
+
121
+ # When monitor flag is used, pipeline_version_run_id is mandatory
122
+ if monitor and not pipeline_version_run_id:
123
+ raise ValueError("--pipeline_version_run_id is required when using --monitor flag.")
124
+
125
+ if pipeline_url:
126
+ # When using pipeline_url, other parameters are optional (will be parsed from URL)
127
+ required_params_provided = True
128
+ else:
129
+ # When not using pipeline_url, all individual parameters are required
130
+ required_params_provided = all([pipeline_id, user_id, app_id, pipeline_version_id])
131
+
132
+ if not required_params_provided:
133
+ raise ValueError(
134
+ "Either --user_id & --app_id & --pipeline_id & --pipeline_version_id or --pipeline_url must be provided."
135
+ )
136
+
137
+ if pipeline_url:
138
+ pipeline = Pipeline(
139
+ url=pipeline_url,
140
+ pat=ctx.obj.current.pat,
141
+ base_url=ctx.obj.current.api_base,
142
+ pipeline_version_run_id=pipeline_version_run_id,
143
+ nodepool_id=nodepool_id,
144
+ compute_cluster_id=compute_cluster_id,
145
+ log_file=log_file,
146
+ )
147
+ else:
148
+ pipeline = Pipeline(
149
+ pipeline_id=pipeline_id,
150
+ pipeline_version_id=pipeline_version_id,
151
+ pipeline_version_run_id=pipeline_version_run_id,
152
+ user_id=user_id,
153
+ app_id=app_id,
154
+ nodepool_id=nodepool_id,
155
+ compute_cluster_id=compute_cluster_id,
156
+ pat=ctx.obj.current.pat,
157
+ base_url=ctx.obj.current.api_base,
158
+ log_file=log_file,
159
+ )
160
+
161
+ if monitor:
162
+ # Monitor existing pipeline run instead of starting new one
163
+ result = pipeline.monitor_only(timeout=timeout, monitor_interval=monitor_interval)
164
+ else:
165
+ # Start new pipeline run and monitor it
166
+ result = pipeline.run(timeout=timeout, monitor_interval=monitor_interval)
167
+ click.echo(json.dumps(result, indent=2, default=str))
168
+
169
+
30
170
  @pipeline.command()
31
171
  @click.argument(
32
172
  "pipeline_path",
@@ -7,6 +7,7 @@ from clarifai.client.input import Inputs
7
7
  from clarifai.client.lister import Lister
8
8
  from clarifai.client.model import Model
9
9
  from clarifai.client.module import Module
10
+ from clarifai.client.pipeline import Pipeline
10
11
  from clarifai.client.search import Search
11
12
  from clarifai.client.user import User
12
13
  from clarifai.client.workflow import Workflow
@@ -18,6 +19,7 @@ __all__ = [
18
19
  'App',
19
20
  'Model',
20
21
  'Workflow',
22
+ 'Pipeline',
21
23
  'Module',
22
24
  'Lister',
23
25
  'Dataset',
@@ -0,0 +1,312 @@
1
+ import time
2
+ import uuid
3
+ from typing import Dict, List
4
+
5
+ from clarifai_grpc.grpc.api import resources_pb2, service_pb2
6
+ from clarifai_grpc.grpc.api.status import status_code_pb2
7
+
8
+ from clarifai.client.base import BaseClient
9
+ from clarifai.client.lister import Lister
10
+ from clarifai.errors import UserError
11
+ from clarifai.urls.helper import ClarifaiUrlHelper
12
+ from clarifai.utils.constants import DEFAULT_BASE
13
+ from clarifai.utils.logging import logger
14
+
15
+
16
+ def _get_status_name(status_code: int) -> str:
17
+ """Get the human-readable name for a status code."""
18
+ status_mapping = {
19
+ # Job status codes (these are the actual values based on the error message showing 64001)
20
+ 64001: "JOB_QUEUED",
21
+ 64002: "JOB_RUNNING",
22
+ 64003: "JOB_COMPLETED",
23
+ 64004: "JOB_FAILED",
24
+ 64005: "JOB_UNEXPECTED_ERROR",
25
+ # Standard status codes
26
+ 10000: "SUCCESS",
27
+ 10010: "MIXED_STATUS",
28
+ }
29
+ return status_mapping.get(status_code, f"UNKNOWN_STATUS_{status_code}")
30
+
31
+
32
+ class Pipeline(Lister, BaseClient):
33
+ """Pipeline is a class that provides access to Clarifai API endpoints related to Pipeline information."""
34
+
35
+ def __init__(
36
+ self,
37
+ url: str = None,
38
+ pipeline_id: str = None,
39
+ pipeline_version_id: str = None,
40
+ pipeline_version_run_id: str = None,
41
+ user_id: str = None,
42
+ app_id: str = None,
43
+ nodepool_id: str = None,
44
+ compute_cluster_id: str = None,
45
+ log_file: str = None,
46
+ base_url: str = DEFAULT_BASE,
47
+ pat: str = None,
48
+ token: str = None,
49
+ root_certificates_path: str = None,
50
+ **kwargs,
51
+ ):
52
+ """Initializes a Pipeline object.
53
+
54
+ Args:
55
+ url (str): The URL to initialize the pipeline object.
56
+ pipeline_id (str): The Pipeline ID to interact with.
57
+ pipeline_version_id (str): The Pipeline Version ID to interact with.
58
+ pipeline_version_run_id (str): The Pipeline Version Run ID. If not provided, a UUID will be generated.
59
+ user_id (str): The User ID that owns the pipeline.
60
+ app_id (str): The App ID that contains the pipeline.
61
+ nodepool_id (str): The Nodepool ID to run the pipeline on.
62
+ compute_cluster_id (str): The Compute Cluster ID to run the pipeline on.
63
+ log_file (str): Path to file where logs should be written. If not provided, logs are displayed on console.
64
+ base_url (str): Base API url. Default "https://api.clarifai.com"
65
+ pat (str): A personal access token for authentication. Can be set as env var CLARIFAI_PAT
66
+ token (str): A session token for authentication. Accepts either a session token or a pat. Can be set as env var CLARIFAI_SESSION_TOKEN
67
+ root_certificates_path (str): Path to the SSL root certificates file, used to establish secure gRPC connections.
68
+ **kwargs: Additional keyword arguments to be passed to the Pipeline.
69
+ """
70
+ if url and pipeline_id:
71
+ raise UserError("You can only specify one of url or pipeline_id.")
72
+ if not url and not pipeline_id:
73
+ raise UserError("You must specify one of url or pipeline_id.")
74
+ if url:
75
+ parsed_user_id, parsed_app_id, _, parsed_pipeline_id, parsed_version_id = (
76
+ ClarifaiUrlHelper.split_clarifai_url(url)
77
+ )
78
+ user_id = user_id or parsed_user_id
79
+ app_id = app_id or parsed_app_id
80
+ pipeline_id = parsed_pipeline_id
81
+ pipeline_version_id = pipeline_version_id or parsed_version_id
82
+
83
+ self.pipeline_id = pipeline_id
84
+ self.pipeline_version_id = pipeline_version_id
85
+ self.pipeline_version_run_id = pipeline_version_run_id or str(uuid.uuid4())
86
+ self.user_id = user_id
87
+ self.app_id = app_id
88
+ self.nodepool_id = nodepool_id
89
+ self.compute_cluster_id = compute_cluster_id
90
+ self.log_file = log_file
91
+
92
+ BaseClient.__init__(
93
+ self,
94
+ user_id=user_id,
95
+ app_id=app_id,
96
+ base=base_url,
97
+ pat=pat,
98
+ token=token,
99
+ root_certificates_path=root_certificates_path,
100
+ )
101
+ Lister.__init__(self)
102
+
103
+ # Set up runner selector if compute cluster and nodepool are provided
104
+ self._runner_selector = None
105
+ if self.compute_cluster_id and self.nodepool_id:
106
+ from clarifai.client.nodepool import Nodepool
107
+
108
+ self._runner_selector = Nodepool.get_runner_selector(
109
+ user_id=self.user_id,
110
+ compute_cluster_id=self.compute_cluster_id,
111
+ nodepool_id=self.nodepool_id,
112
+ )
113
+
114
+ def run(self, inputs: List = None, timeout: int = 3600, monitor_interval: int = 10) -> Dict:
115
+ """Run the pipeline and monitor its progress.
116
+
117
+ Args:
118
+ inputs (List): List of inputs to run the pipeline with. If None, runs without inputs.
119
+ timeout (int): Maximum time to wait for completion in seconds. Default 3600 (1 hour).
120
+ monitor_interval (int): Interval between status checks in seconds. Default 10.
121
+
122
+ Returns:
123
+ Dict: The pipeline run result.
124
+ """
125
+ # Create a new pipeline version run
126
+ pipeline_version_run = resources_pb2.PipelineVersionRun()
127
+ pipeline_version_run.id = self.pipeline_version_run_id
128
+
129
+ # Set nodepools if nodepool information is available
130
+ if self.nodepool_id and self.compute_cluster_id:
131
+ nodepool = resources_pb2.Nodepool(
132
+ id=self.nodepool_id,
133
+ compute_cluster=resources_pb2.ComputeCluster(
134
+ id=self.compute_cluster_id, user_id=self.user_id
135
+ ),
136
+ )
137
+ pipeline_version_run.nodepools.extend([nodepool])
138
+
139
+ run_request = service_pb2.PostPipelineVersionRunsRequest()
140
+ run_request.user_app_id.CopyFrom(self.user_app_id)
141
+ run_request.pipeline_id = self.pipeline_id
142
+ run_request.pipeline_version_id = self.pipeline_version_id or ""
143
+ run_request.pipeline_version_runs.append(pipeline_version_run)
144
+
145
+ # Add runner selector if available
146
+ if self._runner_selector:
147
+ run_request.runner_selector.CopyFrom(self._runner_selector)
148
+
149
+ logger.info(f"Starting pipeline run for pipeline {self.pipeline_id}")
150
+ response = self.STUB.PostPipelineVersionRuns(
151
+ run_request, metadata=self.auth_helper.metadata
152
+ )
153
+
154
+ if response.status.code != status_code_pb2.StatusCode.SUCCESS:
155
+ raise UserError(
156
+ f"Failed to start pipeline run: {response.status.description}. Details: {response.status.details}"
157
+ )
158
+
159
+ if not response.pipeline_version_runs:
160
+ raise UserError("No pipeline version run was created")
161
+
162
+ pipeline_version_run = response.pipeline_version_runs[0]
163
+ run_id = pipeline_version_run.id or self.pipeline_version_run_id
164
+
165
+ logger.info(f"Pipeline version run created with ID: {run_id}")
166
+
167
+ # Monitor the run
168
+ return self._monitor_pipeline_run(run_id, timeout, monitor_interval)
169
+
170
+ def monitor_only(self, timeout: int = 3600, monitor_interval: int = 10) -> Dict:
171
+ """Monitor an existing pipeline run without starting a new one.
172
+
173
+ Args:
174
+ timeout (int): Maximum time to wait for completion in seconds. Default 3600 (1 hour).
175
+ monitor_interval (int): Interval between status checks in seconds. Default 10.
176
+
177
+ Returns:
178
+ Dict: The pipeline run result.
179
+ """
180
+ if not self.pipeline_version_run_id:
181
+ raise UserError("pipeline_version_run_id is required for monitoring existing runs")
182
+
183
+ logger.info(f"Monitoring existing pipeline run with ID: {self.pipeline_version_run_id}")
184
+
185
+ # Monitor the existing run
186
+ return self._monitor_pipeline_run(self.pipeline_version_run_id, timeout, monitor_interval)
187
+
188
+ def _monitor_pipeline_run(self, run_id: str, timeout: int, monitor_interval: int) -> Dict:
189
+ """Monitor a pipeline version run until completion.
190
+
191
+ Args:
192
+ run_id (str): The pipeline version run ID to monitor.
193
+ timeout (int): Maximum time to wait for completion in seconds.
194
+ monitor_interval (int): Interval between status checks in seconds.
195
+
196
+ Returns:
197
+ Dict: The pipeline run result.
198
+ """
199
+ start_time = time.time()
200
+ seen_logs = set()
201
+
202
+ while time.time() - start_time < timeout:
203
+ # Get run status
204
+ get_run_request = service_pb2.GetPipelineVersionRunRequest()
205
+ get_run_request.user_app_id.CopyFrom(self.user_app_id)
206
+ get_run_request.pipeline_id = self.pipeline_id
207
+ get_run_request.pipeline_version_id = self.pipeline_version_id or ""
208
+ get_run_request.pipeline_version_run_id = run_id
209
+
210
+ try:
211
+ run_response = self.STUB.GetPipelineVersionRun(
212
+ get_run_request, metadata=self.auth_helper.metadata
213
+ )
214
+
215
+ if run_response.status.code != status_code_pb2.StatusCode.SUCCESS:
216
+ logger.error(f"Error getting run status: {run_response.status.description}")
217
+ time.sleep(monitor_interval)
218
+ continue
219
+
220
+ pipeline_run = run_response.pipeline_version_run
221
+
222
+ # Display new log entries
223
+ self._display_new_logs(run_id, seen_logs)
224
+
225
+ elapsed_time = time.time() - start_time
226
+ logger.info(f"Pipeline run monitoring... (elapsed {elapsed_time:.1f}s)")
227
+
228
+ # Check if we have orchestration status
229
+ if (
230
+ hasattr(pipeline_run, 'orchestration_status')
231
+ and pipeline_run.orchestration_status
232
+ ):
233
+ orch_status = pipeline_run.orchestration_status
234
+ if hasattr(orch_status, 'status') and orch_status.status:
235
+ status_code = orch_status.status.code
236
+ status_name = _get_status_name(status_code)
237
+ logger.info(f"Pipeline run status: {status_code} ({status_name})")
238
+
239
+ # Display orchestration status details if available
240
+ if hasattr(orch_status, 'description') and orch_status.description:
241
+ logger.info(f"Orchestration status: {orch_status.description}")
242
+
243
+ # Success codes that allow continuation: JOB_RUNNING, JOB_QUEUED
244
+ if status_code in [64001, 64002]: # JOB_QUEUED, JOB_RUNNING
245
+ logger.info(f"Pipeline run in progress: {status_code} ({status_name})")
246
+ # Continue monitoring
247
+ # Successful terminal state: JOB_COMPLETED
248
+ elif status_code == 64003: # JOB_COMPLETED
249
+ logger.info("Pipeline run completed successfully!")
250
+ return {"status": "success", "pipeline_version_run": pipeline_run}
251
+ # Failure terminal states: JOB_UNEXPECTED_ERROR, JOB_FAILED
252
+ elif status_code in [64004, 64005]: # JOB_FAILED, JOB_UNEXPECTED_ERROR
253
+ logger.error(
254
+ f"Pipeline run failed with status: {status_code} ({status_name})"
255
+ )
256
+ return {"status": "failed", "pipeline_version_run": pipeline_run}
257
+ # Handle legacy SUCCESS status for backward compatibility
258
+ elif status_code == status_code_pb2.StatusCode.SUCCESS:
259
+ logger.info("Pipeline run completed successfully!")
260
+ return {"status": "success", "pipeline_version_run": pipeline_run}
261
+ elif status_code != status_code_pb2.StatusCode.MIXED_STATUS:
262
+ # Log other unexpected statuses but continue monitoring
263
+ logger.warning(
264
+ f"Unexpected pipeline run status: {status_code} ({status_name}). Continuing to monitor..."
265
+ )
266
+
267
+ except Exception as e:
268
+ logger.error(f"Error monitoring pipeline run: {e}")
269
+
270
+ time.sleep(monitor_interval)
271
+
272
+ logger.error(f"Pipeline run timed out after {timeout} seconds")
273
+ return {"status": "timeout"}
274
+
275
+ def _display_new_logs(self, run_id: str, seen_logs: set):
276
+ """Display new log entries for a pipeline version run.
277
+
278
+ Args:
279
+ run_id (str): The pipeline version run ID.
280
+ seen_logs (set): Set of already seen log entry IDs.
281
+ """
282
+ try:
283
+ logs_request = service_pb2.ListLogEntriesRequest()
284
+ logs_request.user_app_id.CopyFrom(self.user_app_id)
285
+ logs_request.pipeline_id = self.pipeline_id
286
+ logs_request.pipeline_version_id = self.pipeline_version_id or ""
287
+ logs_request.pipeline_version_run_id = run_id
288
+ logs_request.log_type = "pipeline.version.run" # Set required log type
289
+ logs_request.page = 1
290
+ logs_request.per_page = 50
291
+
292
+ logs_response = self.STUB.ListLogEntries(
293
+ logs_request, metadata=self.auth_helper.metadata
294
+ )
295
+
296
+ if logs_response.status.code == status_code_pb2.StatusCode.SUCCESS:
297
+ for log_entry in logs_response.log_entries:
298
+ # Use log entry URL or timestamp as unique identifier
299
+ log_id = log_entry.url or f"{log_entry.created_at.seconds}_{log_entry.message}"
300
+ if log_id not in seen_logs:
301
+ seen_logs.add(log_id)
302
+ log_message = f"[LOG] {log_entry.message.strip()}"
303
+
304
+ # Write to file if log_file is specified, otherwise log to console
305
+ if self.log_file:
306
+ with open(self.log_file, 'a', encoding='utf-8') as f:
307
+ f.write(log_message + '\n')
308
+ else:
309
+ logger.info(log_message)
310
+
311
+ except Exception as e:
312
+ logger.debug(f"Error fetching logs: {e}")
@@ -73,6 +73,8 @@ class ModelBuilder:
73
73
  validate_api_ids: bool = True,
74
74
  download_validation_only: bool = False,
75
75
  app_not_found_action: Literal["auto_create", "prompt", "error"] = "error",
76
+ pat: str = None,
77
+ base_url: str = None,
76
78
  ):
77
79
  """
78
80
  :param folder: The folder containing the model.py, config.yaml, requirements.txt and
@@ -83,12 +85,16 @@ class ModelBuilder:
83
85
  just downloading a checkpoint.
84
86
  :param app_not_found_action: Defines how to handle the case when the app is not found.
85
87
  Options: 'auto_create' - create automatically, 'prompt' - ask user, 'error' - raise exception.
88
+ :param pat: Personal access token for authentication. If None, will use environment variables.
89
+ :param base_url: Base URL for the API. If None, will use environment variables.
86
90
  """
87
91
  assert app_not_found_action in ["auto_create", "prompt", "error"], ValueError(
88
92
  f"Expected one of {['auto_create', 'prompt', 'error']}, got {app_not_found_action=}"
89
93
  )
90
94
  self.app_not_found_action = app_not_found_action
91
95
  self._client = None
96
+ self._pat = pat
97
+ self._base_url = base_url
92
98
  if not validate_api_ids: # for backwards compatibility
93
99
  download_validation_only = True
94
100
  self.download_validation_only = download_validation_only
@@ -487,8 +493,20 @@ class ModelBuilder:
487
493
  user_id = model.get('user_id')
488
494
  app_id = model.get('app_id')
489
495
 
490
- self._base_api = os.environ.get('CLARIFAI_API_BASE', 'https://api.clarifai.com')
491
- self._client = BaseClient(user_id=user_id, app_id=app_id, base=self._base_api)
496
+ # Use context parameters if provided, otherwise fall back to environment variables
497
+ self._base_api = (
498
+ self._base_url
499
+ if self._base_url
500
+ else os.environ.get('CLARIFAI_API_BASE', 'https://api.clarifai.com')
501
+ )
502
+
503
+ # Create BaseClient with explicit pat parameter if provided
504
+ if self._pat:
505
+ self._client = BaseClient(
506
+ user_id=user_id, app_id=app_id, base=self._base_api, pat=self._pat
507
+ )
508
+ else:
509
+ self._client = BaseClient(user_id=user_id, app_id=app_id, base=self._base_api)
492
510
 
493
511
  return self._client
494
512
 
@@ -1226,15 +1244,17 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
1226
1244
  return False
1227
1245
 
1228
1246
 
1229
- def upload_model(folder, stage, skip_dockerfile):
1247
+ def upload_model(folder, stage, skip_dockerfile, pat=None, base_url=None):
1230
1248
  """
1231
1249
  Uploads a model to Clarifai.
1232
1250
 
1233
1251
  :param folder: The folder containing the model files.
1234
1252
  :param stage: The stage we are calling download checkpoints from. Typically this would "upload" and will download checkpoints if config.yaml checkpoints section has when set to "upload". Other options include "runtime" to be used in load_model or "upload" to be used during model upload. Set this stage to whatever you have in config.yaml to force downloading now.
1235
1253
  :param skip_dockerfile: If True, will not create a Dockerfile.
1254
+ :param pat: Personal access token for authentication. If None, will use environment variables.
1255
+ :param base_url: Base URL for the API. If None, will use environment variables.
1236
1256
  """
1237
- builder = ModelBuilder(folder, app_not_found_action="prompt")
1257
+ builder = ModelBuilder(folder, app_not_found_action="prompt", pat=pat, base_url=base_url)
1238
1258
  builder.download_checkpoints(stage=stage)
1239
1259
  if not skip_dockerfile:
1240
1260
  builder.create_dockerfile()
@@ -28,7 +28,7 @@ def main():
28
28
  parser.add_argument(
29
29
  '--pool_size',
30
30
  type=int,
31
- default=32,
31
+ default=os.environ.get('CLARIFAI_NUM_THREADS', 32),
32
32
  help="The number of threads to use for the gRPC server.",
33
33
  choices=range(1, 129),
34
34
  ) # pylint: disable=range-builtin-not-iterating
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clarifai
3
- Version: 11.5.6
3
+ Version: 11.6.0
4
4
  Home-page: https://github.com/Clarifai/clarifai-python
5
5
  Author: Clarifai
6
6
  Author-email: support@clarifai.com
@@ -20,7 +20,7 @@ Requires-Python: >=3.8
20
20
  Description-Content-Type: text/markdown
21
21
  License-File: LICENSE
22
22
  Requires-Dist: clarifai-grpc>=11.5.5
23
- Requires-Dist: clarifai-protocol>=0.0.24
23
+ Requires-Dist: clarifai-protocol>=0.0.25
24
24
  Requires-Dist: numpy>=1.22.0
25
25
  Requires-Dist: tqdm>=4.65.0
26
26
  Requires-Dist: PyYAML>=6.0.1
@@ -1,4 +1,4 @@
1
- clarifai/__init__.py,sha256=BxuXNC41hShXWv3SgslBYu8ufM3yw21Lbp4jemirGjY,23
1
+ clarifai/__init__.py,sha256=kjhthIE5r_ArzQulbcqnrRj4ewlV2jNJYfo-PPrxloM,23
2
2
  clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  clarifai/errors.py,sha256=GXa6D4v_L404J83jnRNFPH7s-1V9lk7w6Ws99f1g-AY,2772
4
4
  clarifai/versions.py,sha256=ecSuEB_nOL2XSoYHDw2n23XUbm_KPOGjudMXmQrGdS8,224
@@ -8,15 +8,15 @@ clarifai/cli/__main__.py,sha256=7nPbLW7Jr2shkgMPvnxpn4xYGMvIcnqluJ69t9w4H_k,74
8
8
  clarifai/cli/base.py,sha256=mzfAHRhon6tKntpxk241GD-Sjrb2-V99nAOasElLuuw,8254
9
9
  clarifai/cli/compute_cluster.py,sha256=8Xss0Obrp6l1XuxJe0luOqU_pf8vXGDRi6jyIe8qR6k,2282
10
10
  clarifai/cli/deployment.py,sha256=9C4I6_kyMxRkWl6h681wc79-3mAtDHtTUaxRv05OZMs,4262
11
- clarifai/cli/model.py,sha256=71u7JS0xnxQOLp3s9cKGbT1hyhf1CIVxMeFMU2fiLTM,31553
11
+ clarifai/cli/model.py,sha256=23h5tTzz2secprPFdYpI48HWQB3gtv5Dvl0WSLyZgaE,31711
12
12
  clarifai/cli/nodepool.py,sha256=H6OIdUW_EiyDUwZogzEDoYmVwEjLMsgoDlPyE7gjIuU,4245
13
- clarifai/cli/pipeline.py,sha256=M8gW72IB5hx1OIf0pOytZhnw50euPnXxW4vuZ9Fqyqw,5477
13
+ clarifai/cli/pipeline.py,sha256=smWPCK9kLCqnjTCb3w8BAeiAcowY20Bdxfk-OCzCi0I,10601
14
14
  clarifai/cli/pipeline_step.py,sha256=eOxU4MdPBuB01A00Rz6m9THLyTaTLOTKwGwSVyegkyI,3808
15
15
  clarifai/cli/templates/__init__.py,sha256=HbMlZuYOMyVJde73ijNAevmSRUpIttGlHdwyO4W-JOs,44
16
16
  clarifai/cli/templates/model_templates.py,sha256=_ZonIBnY9KKSJY31KZbUys_uN_k_Txu7Dip12KWfmSU,9633
17
17
  clarifai/cli/templates/pipeline_step_templates.py,sha256=HU1BoU7wG71MviQAvyecxT_qo70XhTtPGYtoIQ-U-l0,1663
18
18
  clarifai/cli/templates/pipeline_templates.py,sha256=mfHrEoRxICIv00zxfgIct2IpxcMmZ6zjHG8WLF1TPcI,4409
19
- clarifai/client/__init__.py,sha256=NhpNFRJY6mTi8ca-5hUeTEmYeDKHDNXY48FN63pDuos,703
19
+ clarifai/client/__init__.py,sha256=KXvZFE9TCJf1k_GNUHCZ4icsUlKr1lz0cnBR91LuY8M,765
20
20
  clarifai/client/app.py,sha256=1M9XDsPWIEsj0g-mgIeZ9Mvkt85UHSbrv6pEr-QKfNg,41423
21
21
  clarifai/client/base.py,sha256=rXQlB0BXbKLOgLVJW_2axYh0Vd_F0BbgJE_DXTQoG4U,9109
22
22
  clarifai/client/compute_cluster.py,sha256=ViPyh-FibXL1J0ypsVOTaQnR1ymKohmZEuA13RwA-hc,10254
@@ -28,6 +28,7 @@ clarifai/client/model.py,sha256=8D_L6nuL4_hBAKgwseYhoAeKS9u3ky0zczkcJghxFe8,9007
28
28
  clarifai/client/model_client.py,sha256=4gIS0mKBdiNMA1x_6Wo6H7WbfLsmQix64EpONcQjQV4,37129
29
29
  clarifai/client/module.py,sha256=jLViQYvVV3FmRN_ivvbk83uwsx7CgYGeEx4dYAr6yD4,4537
30
30
  clarifai/client/nodepool.py,sha256=Y5zQ0JLdTjAp2TmVnx7AAOwaB2YUslk3nl7s6BQ90FQ,16415
31
+ clarifai/client/pipeline.py,sha256=Hy3qnSX1pcoi-OAtdzr-qxkRYi1CxsaUzsfS3GDtETM,14358
31
32
  clarifai/client/runner.py,sha256=5xCiqByGGscfNm0IjHelhDTx8-9l8G0C3HL-3YZogK8,2253
32
33
  clarifai/client/search.py,sha256=3LLfATrdU43a0mRNITmJV-E53bhfafZkYsbwkTtlnyU,15661
33
34
  clarifai/client/user.py,sha256=YDAXSOh7ACsvCjVctugiTu8MXFN_TDBoXuEKGXv_uHg,21997
@@ -70,12 +71,12 @@ clarifai/rag/__init__.py,sha256=wu3PzAzo7uqgrEzuaC9lY_3gj1HFiR3GU3elZIKTT5g,40
70
71
  clarifai/rag/rag.py,sha256=EG3GoFrHFCmA70Tz49_0Jo1-3WIaHSgWGHecPeErcdc,14170
71
72
  clarifai/rag/utils.py,sha256=_gVZdABuMnraCKViLruV75x0F3IpgFXN6amYSGE5_xc,4462
72
73
  clarifai/runners/__init__.py,sha256=wXLaSljH7qLeJCrZdKEnlQh2tNqTQAIZKWOu2rZ6wGs,279
73
- clarifai/runners/server.py,sha256=9qVAs8pRHmtyY0RCNIQ1uP8nqDADIFZ03LnkoDt1h4U,4692
74
+ clarifai/runners/server.py,sha256=YNK2ZjVsAzpULJ6cBAJotrL3xSJ_Gx3VodbBY_Fa7kQ,4732
74
75
  clarifai/runners/dockerfile_template/Dockerfile.template,sha256=DUH7F0-uLOV0LTjnPde-9chSzscAAxBAwjTxi9b_l9g,2425
75
76
  clarifai/runners/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
77
  clarifai/runners/models/dummy_openai_model.py,sha256=pcmAVbqTTGG4J3BLVjKfvM_SQ-GET_XexIUdLcr9Zvo,8373
77
78
  clarifai/runners/models/mcp_class.py,sha256=RdKn7rW4vYol0VRDZiLTSMfkqjLhO1ijXAQ0Rq0Jfnw,6647
78
- clarifai/runners/models/model_builder.py,sha256=lzXf1H4oQ4l672xHoXSaT4bi0z0vTj3YnziT7RRRLIg,63695
79
+ clarifai/runners/models/model_builder.py,sha256=JAf6nPuL5Esus5g3RL_Cq1-9-ZkgDhe2DXtcvDHuOuY,64701
79
80
  clarifai/runners/models/model_class.py,sha256=Ndh437BNMkpFBo6B108GuKL8sGYaGnSplZ6FxOgd_v8,20010
80
81
  clarifai/runners/models/model_run_locally.py,sha256=6-6WjEKc0ba3gAv4wOLdMs2XOzS3b-2bZHJS0wdVqJY,20088
81
82
  clarifai/runners/models/model_runner.py,sha256=tZTX1XKMlniJEmd1WMjcwGfej5NCWqv23HZ4xrG8YV8,9153
@@ -118,9 +119,9 @@ clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
118
119
  clarifai/workflows/export.py,sha256=HvUYG9N_-UZoRR0-_tdGbZ950_AeBqawSppgUxQebR0,1913
119
120
  clarifai/workflows/utils.py,sha256=ESL3INcouNcLKCh-nMpfXX-YbtCzX7tz7hT57_RGQ3M,2079
120
121
  clarifai/workflows/validate.py,sha256=UhmukyHkfxiMFrPPeBdUTiCOHQT5-shqivlBYEyKTlU,2931
121
- clarifai-11.5.6.dist-info/licenses/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
122
- clarifai-11.5.6.dist-info/METADATA,sha256=dE9Pb314d6-aAeFuNetMEyt6GsbveEuJdqnjTbeln7E,22737
123
- clarifai-11.5.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
124
- clarifai-11.5.6.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
125
- clarifai-11.5.6.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
126
- clarifai-11.5.6.dist-info/RECORD,,
122
+ clarifai-11.6.0.dist-info/licenses/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
123
+ clarifai-11.6.0.dist-info/METADATA,sha256=PPsStGThhQJq7knvZFbIoQL0JfEACgAPaUrWAH1I8CQ,22737
124
+ clarifai-11.6.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
125
+ clarifai-11.6.0.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
126
+ clarifai-11.6.0.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
127
+ clarifai-11.6.0.dist-info/RECORD,,