clarifai 10.10.0__py3-none-any.whl → 10.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/client/dataset.py +21 -0
- clarifai/client/deployment.py +2 -1
- clarifai/client/input.py +27 -0
- clarifai/datasets/upload/multimodal.py +0 -2
- clarifai/runners/dockerfile_template/{Dockerfile.cpu.template → Dockerfile.template} +21 -11
- clarifai/runners/models/model_upload.py +100 -12
- {clarifai-10.10.0.dist-info → clarifai-10.10.1.dist-info}/METADATA +1 -1
- {clarifai-10.10.0.dist-info → clarifai-10.10.1.dist-info}/RECORD +13 -14
- {clarifai-10.10.0.dist-info → clarifai-10.10.1.dist-info}/WHEEL +1 -1
- clarifai/runners/dockerfile_template/Dockerfile.cuda.template +0 -83
- {clarifai-10.10.0.dist-info → clarifai-10.10.1.dist-info}/LICENSE +0 -0
- {clarifai-10.10.0.dist-info → clarifai-10.10.1.dist-info}/entry_points.txt +0 -0
- {clarifai-10.10.0.dist-info → clarifai-10.10.1.dist-info}/top_level.txt +0 -0
clarifai/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "10.10.
|
1
|
+
__version__ = "10.10.1"
|
clarifai/client/dataset.py
CHANGED
@@ -9,6 +9,7 @@ from typing import Dict, Generator, List, Optional, Tuple, Type, TypeVar, Union
|
|
9
9
|
|
10
10
|
import requests
|
11
11
|
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
|
12
|
+
from clarifai_grpc.grpc.api.resources_pb2 import Input
|
12
13
|
from clarifai_grpc.grpc.api.service_pb2 import MultiInputResponse
|
13
14
|
from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
|
14
15
|
from google.protobuf.json_format import MessageToDict
|
@@ -190,6 +191,26 @@ class Dataset(Lister, BaseClient):
|
|
190
191
|
}
|
191
192
|
yield Dataset.from_auth_helper(self.auth_helper, **kwargs)
|
192
193
|
|
194
|
+
def list_inputs(self, page_no: int = None, per_page: int = None,
|
195
|
+
input_type: str = None) -> Generator[Input, None, None]:
|
196
|
+
"""Lists all the inputs for the dataset.
|
197
|
+
|
198
|
+
Args:
|
199
|
+
page_no (int): The page number to list.
|
200
|
+
per_page (int): The number of items per page.
|
201
|
+
input_type (str): The type of input to list. Options: 'image', 'video', 'audio', 'text'.
|
202
|
+
|
203
|
+
Yields:
|
204
|
+
Input: Input objects in the dataset.
|
205
|
+
|
206
|
+
Example:
|
207
|
+
>>> from clarifai.client.dataset import Dataset
|
208
|
+
>>> dataset = Dataset(dataset_id='dataset_id', user_id='user_id', app_id='app_id')
|
209
|
+
>>> all_dataset_inputs = list(dataset.list_inputs())
|
210
|
+
"""
|
211
|
+
return self.input_object.list_inputs(
|
212
|
+
dataset_id=self.id, page_no=page_no, per_page=per_page, input_type=input_type)
|
213
|
+
|
193
214
|
def __iter__(self):
|
194
215
|
return iter(DatasetExportReader(archive_url=self.archive_zip()))
|
195
216
|
|
clarifai/client/deployment.py
CHANGED
@@ -49,7 +49,8 @@ class Deployment(Lister, BaseClient):
|
|
49
49
|
Returns:
|
50
50
|
resources_pb2.RunnerSelector: A RunnerSelector object for the given deployment_id.
|
51
51
|
"""
|
52
|
-
return resources_pb2.RunnerSelector(
|
52
|
+
return resources_pb2.RunnerSelector(
|
53
|
+
deployment=resources_pb2.Deployment(id=deployment_id, user_id=user_id))
|
53
54
|
|
54
55
|
def __getattr__(self, name):
|
55
56
|
return getattr(self.deployment_info, name)
|
clarifai/client/input.py
CHANGED
@@ -867,6 +867,33 @@ class Inputs(Lister, BaseClient):
|
|
867
867
|
raise Exception(response.status)
|
868
868
|
self.logger.info("\nInputs Deleted\n%s", response.status)
|
869
869
|
|
870
|
+
def delete_annotations(self, input_ids: List[str], annotation_ids: List[str] = []) -> None:
|
871
|
+
"""Delete list of annotations of input objects from the app.
|
872
|
+
|
873
|
+
Args:
|
874
|
+
input_ids (Input): List of input objects for which annotations to delete.
|
875
|
+
annotation_ids (List[str]): List of annotation ids to delete.
|
876
|
+
|
877
|
+
Example:
|
878
|
+
>>> from clarifai.client.user import User
|
879
|
+
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
880
|
+
>>> input_obj.delete_annotations(input_ids=['input_id_1', 'input_id_2'])
|
881
|
+
|
882
|
+
Note:
|
883
|
+
'annotation_ids' are optional but if the are provided, the number and order in
|
884
|
+
'annotation_ids' and 'input_ids' should match
|
885
|
+
"""
|
886
|
+
if not isinstance(input_ids, list):
|
887
|
+
raise UserError("input_ids must be a list of input ids")
|
888
|
+
if annotation_ids and len(input_ids) != len(annotation_ids):
|
889
|
+
raise UserError("Number of provided annotation_ids and input_ids should match.")
|
890
|
+
request = service_pb2.DeleteAnnotationsRequest(
|
891
|
+
user_app_id=self.user_app_id, ids=annotation_ids, input_ids=input_ids)
|
892
|
+
response = self._grpc_request(self.STUB.DeleteAnnotations, request)
|
893
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
894
|
+
raise Exception(response.status)
|
895
|
+
self.logger.info("\nAnnotations Deleted\n%s", response.status)
|
896
|
+
|
870
897
|
def download_inputs(self, inputs: List[Input]) -> List[bytes]:
|
871
898
|
"""Download list of input objects from the app.
|
872
899
|
|
@@ -6,7 +6,6 @@ from google.protobuf.struct_pb2 import Struct
|
|
6
6
|
|
7
7
|
from clarifai.client.input import Inputs
|
8
8
|
from clarifai.datasets.upload.base import ClarifaiDataLoader, ClarifaiDataset
|
9
|
-
from clarifai.utils.misc import get_uuid
|
10
9
|
|
11
10
|
|
12
11
|
class MultiModalDataset(ClarifaiDataset):
|
@@ -36,7 +35,6 @@ class MultiModalDataset(ClarifaiDataset):
|
|
36
35
|
image_bytes = data_item.image_bytes
|
37
36
|
text = data_item.text
|
38
37
|
labels = data_item.labels if isinstance(data_item.labels, list) else [data_item.labels]
|
39
|
-
id = get_uuid(8)
|
40
38
|
input_id = f"{self.dataset_id}-{id}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
41
39
|
if data_item.metadata is not None:
|
42
40
|
metadata.update(data_item.metadata)
|
@@ -1,16 +1,16 @@
|
|
1
|
-
|
2
|
-
FROM public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim-bookworm as build
|
1
|
+
FROM --platform=$TARGETPLATFORM ${BASE_IMAGE} as build
|
3
2
|
|
4
|
-
|
5
|
-
WORKDIR /app
|
3
|
+
ENV DEBIAN_FRONTEND=noninteractive
|
6
4
|
|
5
|
+
#############################
|
6
|
+
# User specific requirements
|
7
|
+
#############################
|
7
8
|
COPY requirements.txt .
|
8
|
-
# Install requirements and cleanup before leaving this line.
|
9
|
-
# Note(zeiler): this could be in a future template as {{model_python_deps}}
|
10
|
-
RUN python -m pip install -r requirements.txt && rm -rf /root/.cache
|
11
9
|
|
12
|
-
# Install
|
13
|
-
|
10
|
+
# Install requirements and clarifai package and cleanup before leaving this line.
|
11
|
+
# Note(zeiler): this could be in a future template as {{model_python_deps}}
|
12
|
+
RUN pip install --no-cache-dir -r requirements.txt && \
|
13
|
+
pip install --no-cache-dir clarifai
|
14
14
|
|
15
15
|
# These will be set by the templaing system.
|
16
16
|
ENV CLARIFAI_PAT=${CLARIFAI_PAT}
|
@@ -20,12 +20,22 @@ ENV CLARIFAI_NODEPOOL_ID=${CLARIFAI_NODEPOOL_ID}
|
|
20
20
|
ENV CLARIFAI_COMPUTE_CLUSTER_ID=${CLARIFAI_COMPUTE_CLUSTER_ID}
|
21
21
|
ENV CLARIFAI_API_BASE=${CLARIFAI_API_BASE}
|
22
22
|
|
23
|
+
# Set the NUMBA cache dir to /tmp
|
24
|
+
ENV NUMBA_CACHE_DIR=/tmp/numba_cache
|
25
|
+
ENV HOME=/tmp
|
26
|
+
|
27
|
+
# Set the working directory to /app
|
28
|
+
WORKDIR /app
|
29
|
+
|
23
30
|
# Copy the current folder into /app/model_dir that the SDK will expect.
|
31
|
+
# Note(zeiler): would be nice to exclude checkpoints in case they were pre-downloaded.
|
24
32
|
COPY . /app/model_dir/${name}
|
25
33
|
|
26
34
|
# Add the model directory to the python path.
|
27
|
-
ENV PYTHONPATH
|
35
|
+
ENV PYTHONPATH=${PYTHONPATH}:/app/model_dir/${name}
|
36
|
+
|
37
|
+
ENTRYPOINT ["python", "-m", "clarifai.runners.server"]
|
28
38
|
|
29
39
|
# Finally run the clarifai entrypoint to start the runner loop and local dev server.
|
30
40
|
# Note(zeiler): we may want to make this a clarifai CLI call.
|
31
|
-
CMD ["
|
41
|
+
CMD ["--model_path", "/app/model_dir/main"]
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import os
|
2
|
+
import re
|
2
3
|
import time
|
3
4
|
from string import Template
|
4
5
|
|
@@ -23,6 +24,44 @@ def _clear_line(n: int = 1) -> None:
|
|
23
24
|
|
24
25
|
class ModelUploader:
|
25
26
|
DEFAULT_PYTHON_VERSION = 3.11
|
27
|
+
DEFAULT_TORCH_VERSION = '2.4.0'
|
28
|
+
DEFAULT_CUDA_VERSION = '124'
|
29
|
+
# List of available torch images for matrix
|
30
|
+
'''
|
31
|
+
python_version: ['3.8', '3.9', '3.10', '3.11']
|
32
|
+
torch_version: ['2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.4.1', '2.5.0']
|
33
|
+
cuda_version: ['124']
|
34
|
+
'''
|
35
|
+
AVAILABLE_TORCH_IMAGES = [
|
36
|
+
'2.0.0-py3.8-cuda124',
|
37
|
+
'2.0.0-py3.9-cuda124',
|
38
|
+
'2.0.0-py3.10-cuda124',
|
39
|
+
'2.0.0-py3.11-cuda124',
|
40
|
+
'2.1.0-py3.8-cuda124',
|
41
|
+
'2.1.0-py3.9-cuda124',
|
42
|
+
'2.1.0-py3.10-cuda124',
|
43
|
+
'2.1.0-py3.11-cuda124',
|
44
|
+
'2.2.0-py3.8-cuda124',
|
45
|
+
'2.2.0-py3.9-cuda124',
|
46
|
+
'2.2.0-py3.10-cuda124',
|
47
|
+
'2.2.0-py3.11-cuda124',
|
48
|
+
'2.3.0-py3.8-cuda124',
|
49
|
+
'2.3.0-py3.9-cuda124',
|
50
|
+
'2.3.0-py3.10-cuda124',
|
51
|
+
'2.3.0-py3.11-cuda124',
|
52
|
+
'2.4.0-py3.8-cuda124',
|
53
|
+
'2.4.0-py3.9-cuda124',
|
54
|
+
'2.4.0-py3.10-cuda124',
|
55
|
+
'2.4.0-py3.11-cuda124',
|
56
|
+
'2.4.1-py3.8-cuda124',
|
57
|
+
'2.4.1-py3.9-cuda124',
|
58
|
+
'2.4.1-py3.10-cuda124',
|
59
|
+
'2.4.1-py3.11-cuda124',
|
60
|
+
]
|
61
|
+
AVAILABLE_PYTHON_IMAGES = ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13']
|
62
|
+
PYTHON_BASE_IMAGE = 'public.ecr.aws/clarifai-models/python-base:{python_version}'
|
63
|
+
TORCH_BASE_IMAGE = 'public.ecr.aws/clarifai-models/torch:{torch_version}-py{python_version}-cuda{cuda_version}'
|
64
|
+
|
26
65
|
CONCEPTS_REQUIRED_MODEL_TYPE = [
|
27
66
|
'visual-classifier', 'visual-detector', 'visual-segmenter', 'text-classifier'
|
28
67
|
]
|
@@ -144,18 +183,46 @@ class ModelUploader:
|
|
144
183
|
)
|
145
184
|
return self.client.STUB.PostModels(request)
|
146
185
|
|
186
|
+
def _parse_requirements(self):
|
187
|
+
# parse the user's requirements.txt to determine the proper base image to build on top of, based on the torch and other large dependencies and it's versions
|
188
|
+
# List of dependencies to look for
|
189
|
+
dependencies = [
|
190
|
+
'torch',
|
191
|
+
]
|
192
|
+
# Escape dependency names for regex
|
193
|
+
dep_pattern = '|'.join(map(re.escape, dependencies))
|
194
|
+
# All possible version specifiers
|
195
|
+
version_specifiers = '==|>=|<=|!=|~=|>|<'
|
196
|
+
# Compile a regex pattern with verbose mode for readability
|
197
|
+
pattern = re.compile(r"""
|
198
|
+
^\s* # Start of line, optional whitespace
|
199
|
+
(?P<dependency>""" + dep_pattern + r""") # Dependency name
|
200
|
+
\s* # Optional whitespace
|
201
|
+
(?P<specifier>""" + version_specifiers + r""")? # Optional version specifier
|
202
|
+
\s* # Optional whitespace
|
203
|
+
(?P<version>[^\s;]+)? # Optional version (up to space or semicolon)
|
204
|
+
""", re.VERBOSE)
|
205
|
+
|
206
|
+
deendencies_version = {}
|
207
|
+
with open(os.path.join(self.folder, 'requirements.txt'), 'r') as file:
|
208
|
+
for line in file:
|
209
|
+
# Skip empty lines and comments
|
210
|
+
line = line.strip()
|
211
|
+
if not line or line.startswith('#'):
|
212
|
+
continue
|
213
|
+
match = pattern.match(line)
|
214
|
+
if match:
|
215
|
+
dependency = match.group('dependency')
|
216
|
+
version = match.group('version')
|
217
|
+
deendencies_version[dependency] = version if version else None
|
218
|
+
return deendencies_version
|
219
|
+
|
147
220
|
def create_dockerfile(self):
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
'Dockerfile.cuda.template',
|
154
|
-
)
|
155
|
-
else:
|
156
|
-
dockerfile_template = os.path.join(
|
157
|
-
os.path.dirname(os.path.dirname(__file__)), 'dockerfile_template',
|
158
|
-
'Dockerfile.cpu.template')
|
221
|
+
dockerfile_template = os.path.join(
|
222
|
+
os.path.dirname(os.path.dirname(__file__)),
|
223
|
+
'dockerfile_template',
|
224
|
+
'Dockerfile.template',
|
225
|
+
)
|
159
226
|
|
160
227
|
with open(dockerfile_template, 'r') as template_file:
|
161
228
|
dockerfile_template = template_file.read()
|
@@ -166,6 +233,11 @@ class ModelUploader:
|
|
166
233
|
build_info = self.config.get('build_info', {})
|
167
234
|
if 'python_version' in build_info:
|
168
235
|
python_version = build_info['python_version']
|
236
|
+
if python_version not in self.AVAILABLE_PYTHON_IMAGES:
|
237
|
+
logger.error(
|
238
|
+
f"Python version {python_version} not supported, please use one of the following versions: {self.AVAILABLE_PYTHON_IMAGES}"
|
239
|
+
)
|
240
|
+
return
|
169
241
|
logger.info(
|
170
242
|
f"Using Python version {python_version} from the config file to build the Dockerfile")
|
171
243
|
else:
|
@@ -174,10 +246,26 @@ class ModelUploader:
|
|
174
246
|
)
|
175
247
|
python_version = self.DEFAULT_PYTHON_VERSION
|
176
248
|
|
249
|
+
base_image = self.PYTHON_BASE_IMAGE.format(python_version=python_version)
|
250
|
+
|
251
|
+
# Parse the requirements.txt file to determine the base image
|
252
|
+
dependencies = self._parse_requirements()
|
253
|
+
if 'torch' in dependencies and dependencies['torch']:
|
254
|
+
torch_version = dependencies['torch']
|
255
|
+
|
256
|
+
for image in self.AVAILABLE_TORCH_IMAGES:
|
257
|
+
if torch_version in image and f'py{python_version}' in image:
|
258
|
+
base_image = self.TORCH_BASE_IMAGE.format(
|
259
|
+
torch_version=torch_version,
|
260
|
+
python_version=python_version,
|
261
|
+
cuda_version=self.DEFAULT_CUDA_VERSION)
|
262
|
+
logger.info(f"Using Torch version {torch_version} base image to build the Docker image")
|
263
|
+
break
|
264
|
+
|
177
265
|
# Replace placeholders with actual values
|
178
266
|
dockerfile_content = dockerfile_template.safe_substitute(
|
179
|
-
PYTHON_VERSION=python_version,
|
180
267
|
name='main',
|
268
|
+
BASE_IMAGE=base_image,
|
181
269
|
)
|
182
270
|
|
183
271
|
# Write Dockerfile
|
@@ -1,4 +1,4 @@
|
|
1
|
-
clarifai/__init__.py,sha256=
|
1
|
+
clarifai/__init__.py,sha256=9zsqePD7LCjMaH7l_9eXWc_FNCxosvR1TP75yQryev4,24
|
2
2
|
clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
clarifai/errors.py,sha256=RwzTajwds51wLD0MVlMC5kcpBnzRpreDLlazPSBZxrg,2605
|
4
4
|
clarifai/versions.py,sha256=jctnczzfGk_S3EnVqb2FjRKfSREkNmvNEwAAa_VoKiQ,222
|
@@ -13,9 +13,9 @@ clarifai/client/__init__.py,sha256=xI1U0l5AZdRThvQAXCLsd9axxyFzXXJ22m8LHqVjQRU,6
|
|
13
13
|
clarifai/client/app.py,sha256=6pckYme1urV2YJjLIYfeZ-vH0Z5YSQa51jzIMcEfwug,38342
|
14
14
|
clarifai/client/base.py,sha256=hSHOqkXbSKyaRDeylMMnkhUHCAHhEqno4KI0CXGziBA,7536
|
15
15
|
clarifai/client/compute_cluster.py,sha256=EvW9TJjPvInUlggfg1A98sxoWH8_PY5rCVXZhsj6ac0,8705
|
16
|
-
clarifai/client/dataset.py,sha256=
|
17
|
-
clarifai/client/deployment.py,sha256=
|
18
|
-
clarifai/client/input.py,sha256=
|
16
|
+
clarifai/client/dataset.py,sha256=AIzwbYs-ExkmUqW9nuEJgpW8-D7rjA1PtopU5Iu6YZE,32018
|
17
|
+
clarifai/client/deployment.py,sha256=w7Y6pA1rYG4KRK1SwusRZc2sQRXlG8wezuVdzSWpCo0,2586
|
18
|
+
clarifai/client/input.py,sha256=GvrPV2chThNjimekBIleuIr6AD10_wrfc-1Hm5C4NQ8,45648
|
19
19
|
clarifai/client/lister.py,sha256=03KGMvs5RVyYqxLsSrWhNc34I8kiF1Ph0NeyEwu7nMU,2082
|
20
20
|
clarifai/client/model.py,sha256=WmLBPm_rDzbPR_Cxo8gnntBnPiWFt3gYKiiKuJ9lH04,84652
|
21
21
|
clarifai/client/module.py,sha256=FTkm8s9m-EaTKN7g9MnLhGJ9eETUfKG7aWZ3o1RshYs,4204
|
@@ -41,7 +41,7 @@ clarifai/datasets/upload/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
41
41
|
clarifai/datasets/upload/base.py,sha256=UIc0ufyIBCrb83_sFpv21L8FshsX4nwsLYQkdlJfzD4,2357
|
42
42
|
clarifai/datasets/upload/features.py,sha256=jv2x7jGZKS-LMt87sEZNBwwOskHbP26XTMjoiaSA5pg,2024
|
43
43
|
clarifai/datasets/upload/image.py,sha256=HlCsfEMu_C4GVecGSv52RUJ6laLW8H64Pfj_FQyX6qg,8580
|
44
|
-
clarifai/datasets/upload/multimodal.py,sha256=
|
44
|
+
clarifai/datasets/upload/multimodal.py,sha256=4jBFXgT44tPFHm3O3lYcnKM046qjUNJJaR0oBVTa3HM,2309
|
45
45
|
clarifai/datasets/upload/text.py,sha256=boVJenfQZKf79aXu8CEP4g_ANzX5ROdd06g07O7RnXU,2198
|
46
46
|
clarifai/datasets/upload/utils.py,sha256=BerWhq40ZUN30z6VImlc93eZtT-1vI18AMgSOuNzJEM,9647
|
47
47
|
clarifai/datasets/upload/loaders/README.md,sha256=aNRutSCTzLp2ruIZx74ZkN5AxpzwKOxMa7OzabnKpwg,2980
|
@@ -62,15 +62,14 @@ clarifai/rag/rag.py,sha256=L10TcV9E0PF1aJ2Nn1z1x6WVoUoGxbKt20lQXg8ksqo,12594
|
|
62
62
|
clarifai/rag/utils.py,sha256=yr1jAcbpws4vFGBqlAwPPE7v1DRba48g8gixLFw8OhQ,4070
|
63
63
|
clarifai/runners/__init__.py,sha256=3vr4RVvN1IRy2SxJpyycAAvrUBbH-mXR7pqUmu4w36A,412
|
64
64
|
clarifai/runners/server.py,sha256=CVLrv2DjzCvKVXcJ4SWvcFWUZq0bdlBmyEpfVlfgT2A,4902
|
65
|
-
clarifai/runners/dockerfile_template/Dockerfile.
|
66
|
-
clarifai/runners/dockerfile_template/Dockerfile.cuda.template,sha256=8uQp2sX_bIzgQk84FNlS19PwKH_l0Qi54xE7_NVxUTE,3314
|
65
|
+
clarifai/runners/dockerfile_template/Dockerfile.template,sha256=-T38Rscpjot8WVuUTUq1_N0xz_gg653FOHV4XQYGG-U,1453
|
67
66
|
clarifai/runners/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
68
67
|
clarifai/runners/models/base_typed_model.py,sha256=OnAk08Lo2Y1fGiBc6JJ6UvJ8P435cTsikTNYDkStDpI,7790
|
69
68
|
clarifai/runners/models/model_class.py,sha256=9JSPAr4U4K7xI0kSl-q0mHB06zknm2OR-8XIgBCto94,1611
|
70
69
|
clarifai/runners/models/model_run_locally.py,sha256=xbNcD0TMRlk52cUjJH-qenlkeiwS4YcOeb8eYy7KAEI,6583
|
71
70
|
clarifai/runners/models/model_runner.py,sha256=3vzoastQxkGRDK8T9aojDsLNBb9A3IiKm6YmbFrE9S0,6241
|
72
71
|
clarifai/runners/models/model_servicer.py,sha256=X4715PVA5PBurRTYcwSEudg8fShGV6InAF4mmRlRcHg,2826
|
73
|
-
clarifai/runners/models/model_upload.py,sha256=
|
72
|
+
clarifai/runners/models/model_upload.py,sha256=ocd6vnm9Pms9AMwi7j5yTjIHQY6LS9yopTTX6uocJvE,20434
|
74
73
|
clarifai/runners/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
75
74
|
clarifai/runners/utils/data_handler.py,sha256=sxy9zlAgI6ETuxCQhUgEXAn2GCsaW1GxpK6GTaMne0g,6966
|
76
75
|
clarifai/runners/utils/data_utils.py,sha256=R1iQ82TuQ9JwxCJk8yEB1Lyb0BYVhVbWJI9YDi1zGOs,318
|
@@ -92,9 +91,9 @@ clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
92
91
|
clarifai/workflows/export.py,sha256=vICRhIreqDSShxLKjHNM2JwzKsf1B4fdXB0ciMcA70k,1945
|
93
92
|
clarifai/workflows/utils.py,sha256=nGeB_yjVgUO9kOeKTg4OBBaBz-AwXI3m-huSVj-9W18,1924
|
94
93
|
clarifai/workflows/validate.py,sha256=yJq03MaJqi5AK3alKGJJBR89xmmjAQ31sVufJUiOqY8,2556
|
95
|
-
clarifai-10.10.
|
96
|
-
clarifai-10.10.
|
97
|
-
clarifai-10.10.
|
98
|
-
clarifai-10.10.
|
99
|
-
clarifai-10.10.
|
100
|
-
clarifai-10.10.
|
94
|
+
clarifai-10.10.1.dist-info/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
|
95
|
+
clarifai-10.10.1.dist-info/METADATA,sha256=01eG2EIX_sgN8tgNkHnAD782mje2gLrspqrAFUEmSic,19566
|
96
|
+
clarifai-10.10.1.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
|
97
|
+
clarifai-10.10.1.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
|
98
|
+
clarifai-10.10.1.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
|
99
|
+
clarifai-10.10.1.dist-info/RECORD,,
|
@@ -1,83 +0,0 @@
|
|
1
|
-
# Build a virtualenv containing necessary system libraries and Python packages
|
2
|
-
# for users to install their own packages while also being distroless.
|
3
|
-
# * Install python3-venv
|
4
|
-
# * Install gcc libpython3-dev to compile C Python modules
|
5
|
-
# * In the virtualenv: Update pip setuputils and wheel to support building new packages
|
6
|
-
# * Export environment variables to use the virtualenv by default
|
7
|
-
# * Create a non-root user with minimal privileges and use it
|
8
|
-
ARG TARGET_PLATFORM=linux/amd64
|
9
|
-
FROM --platform=$TARGET_PLATFORM public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim-bookworm as build
|
10
|
-
|
11
|
-
ENV DEBIAN_FRONTEND=noninteractive
|
12
|
-
RUN apt-get update && \
|
13
|
-
apt-get install --no-install-suggests --no-install-recommends --yes \
|
14
|
-
software-properties-common \
|
15
|
-
gcc \
|
16
|
-
libpython3-dev && \
|
17
|
-
python${PYTHON_VERSION} -m venv /venv && \
|
18
|
-
/venv/bin/pip install --disable-pip-version-check --upgrade pip setuptools wheel && \
|
19
|
-
apt-get clean && rm -rf /var/lib/apt/lists/*
|
20
|
-
|
21
|
-
# Set environment variables to use virtualenv by default
|
22
|
-
ENV VIRTUAL_ENV=/venv
|
23
|
-
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
24
|
-
|
25
|
-
#############################
|
26
|
-
# User specific requirements
|
27
|
-
#############################
|
28
|
-
COPY requirements.txt .
|
29
|
-
|
30
|
-
# Install requirements and cleanup before leaving this line.
|
31
|
-
# Note(zeiler): this could be in a future template as {{model_python_deps}}
|
32
|
-
RUN python -m pip install -r requirements.txt && rm -rf /root/.cache
|
33
|
-
|
34
|
-
# Install Clarifai SDK
|
35
|
-
RUN python -m pip install clarifai
|
36
|
-
|
37
|
-
#############################
|
38
|
-
# Finally copy everything we built into a distroless image for runtime.
|
39
|
-
######################>#######
|
40
|
-
ARG TARGET_PLATFORM=linux/amd64
|
41
|
-
FROM --platform=$TARGET_PLATFORM gcr.io/distroless/python3-debian12:latest
|
42
|
-
# FROM --platform=$TARGET_PLATFORM gcr.io/distroless/python3-debian12:debug
|
43
|
-
ARG PYTHON_VERSION=${PYTHON_VERSION}
|
44
|
-
# needed to call pip directly
|
45
|
-
COPY --from=build /bin/sh /bin/sh
|
46
|
-
|
47
|
-
# virtual env
|
48
|
-
COPY --from=build /venv /venv
|
49
|
-
|
50
|
-
# We have to overwrite the python3 binary that the distroless image uses
|
51
|
-
COPY --from=build /usr/local/bin/python${PYTHON_VERSION} /usr/bin/python3
|
52
|
-
# And also copy in all the lib files for it.
|
53
|
-
COPY --from=build /usr/local/lib/ /usr/lib/
|
54
|
-
|
55
|
-
# Set environment variables to use virtualenv by default
|
56
|
-
ENV VIRTUAL_ENV=/venv
|
57
|
-
ENV PYTHONPATH=${PYTHONPATH}:${VIRTUAL_ENV}/lib/python${PYTHON_VERSION}/site-packages
|
58
|
-
|
59
|
-
# These will be set by the templaing system.
|
60
|
-
ENV CLARIFAI_PAT=${CLARIFAI_PAT}
|
61
|
-
ENV CLARIFAI_USER_ID=${CLARIFAI_USER_ID}
|
62
|
-
ENV CLARIFAI_RUNNER_ID=${CLARIFAI_RUNNER_ID}
|
63
|
-
ENV CLARIFAI_NODEPOOL_ID=${CLARIFAI_NODEPOOL_ID}
|
64
|
-
ENV CLARIFAI_COMPUTE_CLUSTER_ID=${CLARIFAI_COMPUTE_CLUSTER_ID}
|
65
|
-
ENV CLARIFAI_API_BASE=${CLARIFAI_API_BASE}
|
66
|
-
|
67
|
-
# Set the NUMBA cache dir to /tmp
|
68
|
-
ENV NUMBA_CACHE_DIR=/tmp/numba_cache
|
69
|
-
ENV HOME=/tmp
|
70
|
-
|
71
|
-
# Set the working directory to /app
|
72
|
-
WORKDIR /app
|
73
|
-
|
74
|
-
# Copy the current folder into /app/model_dir that the SDK will expect.
|
75
|
-
# Note(zeiler): would be nice to exclude checkpoints in case they were pre-downloaded.
|
76
|
-
COPY . /app/model_dir/${name}
|
77
|
-
|
78
|
-
# Add the model directory to the python path.
|
79
|
-
ENV PYTHONPATH=${PYTHONPATH}:/app/model_dir/${name}
|
80
|
-
|
81
|
-
# Finally run the clarifai entrypoint to start the runner loop and local dev server.
|
82
|
-
# Note(zeiler): we may want to make this a clarifai CLI call.
|
83
|
-
CMD ["-m", "clarifai.runners.server", "--model_path", "/app/model_dir/${name}"]
|
File without changes
|
File without changes
|
File without changes
|