clarifai 11.1.1__py3-none-any.whl → 11.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/runners/dockerfile_template/Dockerfile.template +8 -8
- clarifai/runners/models/model_builder.py +90 -63
- clarifai/runners/models/model_run_locally.py +1 -1
- clarifai/runners/utils/const.py +4 -6
- clarifai/runners/utils/loader.py +12 -0
- clarifai/utils/logging.py +5 -1
- {clarifai-11.1.1.dist-info → clarifai-11.1.3.dist-info}/METADATA +4 -4
- {clarifai-11.1.1.dist-info → clarifai-11.1.3.dist-info}/RECORD +13 -13
- {clarifai-11.1.1.dist-info → clarifai-11.1.3.dist-info}/LICENSE +0 -0
- {clarifai-11.1.1.dist-info → clarifai-11.1.3.dist-info}/WHEEL +0 -0
- {clarifai-11.1.1.dist-info → clarifai-11.1.3.dist-info}/entry_points.txt +0 -0
- {clarifai-11.1.1.dist-info → clarifai-11.1.3.dist-info}/top_level.txt +0 -0
clarifai/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "11.1.
|
1
|
+
__version__ = "11.1.3"
|
@@ -1,14 +1,14 @@
|
|
1
|
-
# syntax=docker/dockerfile:1
|
1
|
+
# syntax=docker/dockerfile:1.13-labs
|
2
2
|
#############################
|
3
3
|
# User specific requirements installed in the pip_packages
|
4
4
|
#############################
|
5
|
-
FROM --platform=$TARGETPLATFORM ${
|
5
|
+
FROM --platform=$TARGETPLATFORM ${FINAL_IMAGE} as pip_packages
|
6
6
|
|
7
7
|
COPY --link requirements.txt /home/nonroot/requirements.txt
|
8
8
|
|
9
9
|
# Update clarifai package so we always have latest protocol to the API. Everything should land in /venv
|
10
|
-
RUN pip install --no-cache-dir -r /home/nonroot/requirements.txt
|
11
|
-
|
10
|
+
RUN ["pip", "install", "--no-cache-dir", "-r", "/home/nonroot/requirements.txt"]
|
11
|
+
RUN ["pip", "show", "clarifai"]
|
12
12
|
#############################
|
13
13
|
|
14
14
|
#############################
|
@@ -16,15 +16,15 @@ RUN pip install --no-cache-dir -r /home/nonroot/requirements.txt && \
|
|
16
16
|
#############################
|
17
17
|
FROM --platform=$TARGETPLATFORM ${DOWNLOADER_IMAGE} as downloader
|
18
18
|
|
19
|
-
# make sure we have the latest clarifai package.
|
20
|
-
RUN
|
19
|
+
# make sure we have the latest clarifai package. This version is filled in by SDK.
|
20
|
+
RUN ["pip", "install", "clarifai==${CLARIFAI_VERSION}"]
|
21
21
|
#####
|
22
22
|
|
23
23
|
|
24
24
|
#############################
|
25
25
|
# Final runtime image
|
26
26
|
#############################
|
27
|
-
FROM --platform=$TARGETPLATFORM ${
|
27
|
+
FROM --platform=$TARGETPLATFORM ${FINAL_IMAGE} as final
|
28
28
|
|
29
29
|
# Set the NUMBA cache dir to /tmp
|
30
30
|
# Set the TORCHINDUCTOR cache dir to /tmp
|
@@ -54,7 +54,7 @@ RUN ["python", "-m", "clarifai.cli", "model", "download-checkpoints", "--model_p
|
|
54
54
|
|
55
55
|
|
56
56
|
#####
|
57
|
-
# Copy the python packages from the
|
57
|
+
# Copy the python packages from the builder stage.
|
58
58
|
COPY --link=true --from=pip_packages /venv /venv
|
59
59
|
#####
|
60
60
|
|
@@ -16,12 +16,20 @@ from rich.markup import escape
|
|
16
16
|
|
17
17
|
from clarifai.client import BaseClient
|
18
18
|
from clarifai.runners.models.model_class import ModelClass
|
19
|
-
from clarifai.runners.utils.const import (
|
20
|
-
|
21
|
-
|
19
|
+
from clarifai.runners.utils.const import (AVAILABLE_PYTHON_IMAGES, AVAILABLE_TORCH_IMAGES,
|
20
|
+
CONCEPTS_REQUIRED_MODEL_TYPE, DEFAULT_PYTHON_VERSION,
|
21
|
+
PYTHON_BASE_IMAGE, TORCH_BASE_IMAGE)
|
22
22
|
from clarifai.runners.utils.loader import HuggingFaceLoader
|
23
23
|
from clarifai.urls.helper import ClarifaiUrlHelper
|
24
24
|
from clarifai.utils.logging import logger
|
25
|
+
from clarifai.versions import CLIENT_VERSION
|
26
|
+
|
27
|
+
# parse the user's requirements.txt to determine the proper base image to build on top of, based on the torch and other large dependencies and it's versions
|
28
|
+
# List of dependencies to look for
|
29
|
+
dependencies = [
|
30
|
+
'torch',
|
31
|
+
'clarifai',
|
32
|
+
]
|
25
33
|
|
26
34
|
|
27
35
|
def _clear_line(n: int = 1) -> None:
|
@@ -289,43 +297,42 @@ class ModelBuilder:
|
|
289
297
|
)
|
290
298
|
return self.client.STUB.PostModels(request)
|
291
299
|
|
300
|
+
def _match_req_line(self, line):
|
301
|
+
line = line.strip()
|
302
|
+
if not line or line.startswith('#'):
|
303
|
+
return None, None
|
304
|
+
# split on whitespace followed by #
|
305
|
+
line = re.split(r'\s+#', line)[0]
|
306
|
+
if "==" in line:
|
307
|
+
pkg, version = line.split("==")
|
308
|
+
elif ">=" in line:
|
309
|
+
pkg, version = line.split(">=")
|
310
|
+
elif ">" in line:
|
311
|
+
pkg, version = line.split(">")
|
312
|
+
elif "<=" in line:
|
313
|
+
pkg, version = line.split("<=")
|
314
|
+
elif "<" in line:
|
315
|
+
pkg, version = line.split("<")
|
316
|
+
else:
|
317
|
+
pkg, version = line, None # No version specified
|
318
|
+
for dep in dependencies:
|
319
|
+
if dep == pkg:
|
320
|
+
if dep == 'torch' and line.find(
|
321
|
+
'whl/cpu') > 0: # Ignore torch-cpu whl files, use base mage.
|
322
|
+
return None, None
|
323
|
+
return dep.strip(), version.strip() if version else None
|
324
|
+
return None, None
|
325
|
+
|
292
326
|
def _parse_requirements(self):
|
293
|
-
|
294
|
-
# List of dependencies to look for
|
295
|
-
dependencies = [
|
296
|
-
'torch',
|
297
|
-
]
|
298
|
-
# Escape dependency names for regex
|
299
|
-
dep_pattern = '|'.join(map(re.escape, dependencies))
|
300
|
-
# All possible version specifiers
|
301
|
-
version_specifiers = '==|>=|<=|!=|~=|>|<'
|
302
|
-
# Compile a regex pattern with verbose mode for readability
|
303
|
-
pattern = re.compile(r"""
|
304
|
-
^\s* # Start of line, optional whitespace
|
305
|
-
(?P<dependency>""" + dep_pattern + r""") # Dependency name
|
306
|
-
\s* # Optional whitespace
|
307
|
-
(?P<specifier>""" + version_specifiers + r""")? # Optional version specifier
|
308
|
-
\s* # Optional whitespace
|
309
|
-
(?P<version>[^\s;]+)? # Optional version (up to space or semicolon)
|
310
|
-
""", re.VERBOSE)
|
311
|
-
|
312
|
-
deendencies_version = {}
|
327
|
+
dependencies_version = {}
|
313
328
|
with open(os.path.join(self.folder, 'requirements.txt'), 'r') as file:
|
314
329
|
for line in file:
|
315
330
|
# Skip empty lines and comments
|
316
|
-
|
317
|
-
if
|
331
|
+
dependency, version = self._match_req_line(line)
|
332
|
+
if dependency is None:
|
318
333
|
continue
|
319
|
-
|
320
|
-
|
321
|
-
dependency = match.group('dependency')
|
322
|
-
version = match.group('version')
|
323
|
-
if dependency == "torch" and line.find(
|
324
|
-
'whl/cpu') > 0: # Ignore torch-cpu whl files, use base mage.
|
325
|
-
continue
|
326
|
-
|
327
|
-
deendencies_version[dependency] = version if version else None
|
328
|
-
return deendencies_version
|
334
|
+
dependencies_version[dependency] = version if version else None
|
335
|
+
return dependencies_version
|
329
336
|
|
330
337
|
def create_dockerfile(self):
|
331
338
|
dockerfile_template = os.path.join(
|
@@ -357,9 +364,8 @@ class ModelBuilder:
|
|
357
364
|
python_version = DEFAULT_PYTHON_VERSION
|
358
365
|
|
359
366
|
# This is always the final image used for runtime.
|
360
|
-
|
361
|
-
|
362
|
-
downloader_image = PYTHON_BUILDER_IMAGE.format(python_version=python_version)
|
367
|
+
final_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
|
368
|
+
downloader_image = PYTHON_BASE_IMAGE.format(python_version=python_version)
|
363
369
|
|
364
370
|
# Parse the requirements.txt file to determine the base image
|
365
371
|
dependencies = self._parse_requirements()
|
@@ -370,23 +376,43 @@ class ModelBuilder:
|
|
370
376
|
for image in sorted(AVAILABLE_TORCH_IMAGES, reverse=True):
|
371
377
|
if torch_version in image and f'py{python_version}' in image:
|
372
378
|
cuda_version = image.split('-')[-1].replace('cuda', '')
|
373
|
-
|
379
|
+
final_image = TORCH_BASE_IMAGE.format(
|
374
380
|
torch_version=torch_version,
|
375
381
|
python_version=python_version,
|
376
382
|
cuda_version=cuda_version,
|
377
383
|
)
|
378
|
-
# download_image = base_image
|
379
384
|
logger.info(f"Using Torch version {torch_version} base image to build the Docker image")
|
380
385
|
break
|
381
|
-
|
382
|
-
|
383
|
-
|
386
|
+
|
387
|
+
if 'clarifai' not in dependencies:
|
388
|
+
raise Exception(
|
389
|
+
f"clarifai not found in requirements.txt, please add clarifai to the requirements.txt file with a fixed version. Current version is clarifai=={CLIENT_VERSION}"
|
390
|
+
)
|
391
|
+
clarifai_version = dependencies['clarifai']
|
392
|
+
if not clarifai_version:
|
393
|
+
logger.warn(
|
394
|
+
f"clarifai version not found in requirements.txt, using the latest version {CLIENT_VERSION}"
|
395
|
+
)
|
396
|
+
clarifai_version = CLIENT_VERSION
|
397
|
+
lines = []
|
398
|
+
with open(os.path.join(self.folder, 'requirements.txt'), 'r') as file:
|
399
|
+
for line in file:
|
400
|
+
# if the line without whitespace is "clarifai"
|
401
|
+
dependency, version = self._match_req_line(line)
|
402
|
+
if dependency and dependency == "clarifai":
|
403
|
+
lines.append(line.replace("clarifai", f"clarifai=={CLIENT_VERSION}"))
|
404
|
+
else:
|
405
|
+
lines.append(line)
|
406
|
+
with open(os.path.join(self.folder, 'requirements.txt'), 'w') as file:
|
407
|
+
file.writelines(lines)
|
408
|
+
logger.warn(f"Updated requirements.txt to have clarifai=={CLIENT_VERSION}")
|
409
|
+
|
384
410
|
# Replace placeholders with actual values
|
385
411
|
dockerfile_content = dockerfile_template.safe_substitute(
|
386
412
|
name='main',
|
387
|
-
|
388
|
-
RUNTIME_IMAGE=runtime_image, # for runtime
|
413
|
+
FINAL_IMAGE=final_image, # for pip requirements
|
389
414
|
DOWNLOADER_IMAGE=downloader_image, # for downloading checkpoints
|
415
|
+
CLARIFAI_VERSION=clarifai_version, # for clarifai
|
390
416
|
)
|
391
417
|
|
392
418
|
# Write Dockerfile
|
@@ -477,7 +503,8 @@ class ModelBuilder:
|
|
477
503
|
for concept in labels:
|
478
504
|
concept_proto = json_format.ParseDict(concept, resources_pb2.Concept())
|
479
505
|
model_version_proto.output_info.data.concepts.append(concept_proto)
|
480
|
-
|
506
|
+
elif self.config.get("checkpoints") and HuggingFaceLoader.validate_concept(
|
507
|
+
self.checkpoint_path):
|
481
508
|
labels = HuggingFaceLoader.fetch_labels(self.checkpoint_path)
|
482
509
|
logger.info(f"Found {len(labels)} concepts from the model checkpoints.")
|
483
510
|
# sort the concepts by id and then update the config file
|
@@ -495,7 +522,7 @@ class ModelBuilder:
|
|
495
522
|
|
496
523
|
def upload_model_version(self, download_checkpoints):
|
497
524
|
file_path = f"{self.folder}.tar.gz"
|
498
|
-
logger.
|
525
|
+
logger.debug(f"Will tar it into file: {file_path}")
|
499
526
|
|
500
527
|
model_type_id = self.config.get('model').get('model_type_id')
|
501
528
|
|
@@ -536,10 +563,10 @@ class ModelBuilder:
|
|
536
563
|
|
537
564
|
with tarfile.open(self.tar_file, "w:gz") as tar:
|
538
565
|
tar.add(self.folder, arcname=".", filter=filter_func)
|
539
|
-
logger.
|
566
|
+
logger.debug("Tarring complete, about to start upload.")
|
540
567
|
|
541
568
|
file_size = os.path.getsize(self.tar_file)
|
542
|
-
logger.
|
569
|
+
logger.debug(f"Size of the tar is: {file_size} bytes")
|
543
570
|
|
544
571
|
self.storage_request_size = self._get_tar_file_content_size(file_path)
|
545
572
|
if not download_checkpoints and self.config.get("checkpoints"):
|
@@ -572,7 +599,6 @@ class ModelBuilder:
|
|
572
599
|
f"request_id: {response.status.req_id}",
|
573
600
|
end='\r',
|
574
601
|
flush=True)
|
575
|
-
logger.info("")
|
576
602
|
if response.status.code != status_code_pb2.MODEL_BUILDING:
|
577
603
|
logger.error(f"Failed to upload model version: {response}")
|
578
604
|
return
|
@@ -583,7 +609,7 @@ class ModelBuilder:
|
|
583
609
|
self.monitor_model_build()
|
584
610
|
finally:
|
585
611
|
if os.path.exists(self.tar_file):
|
586
|
-
logger.
|
612
|
+
logger.debug(f"Cleaning up upload file: {self.tar_file}")
|
587
613
|
os.remove(self.tar_file)
|
588
614
|
|
589
615
|
def model_version_stream_upload_iterator(self, model_version_proto, file_path):
|
@@ -593,9 +619,9 @@ class ModelBuilder:
|
|
593
619
|
chunk_size = int(127 * 1024 * 1024) # 127MB chunk size
|
594
620
|
num_chunks = (file_size // chunk_size) + 1
|
595
621
|
logger.info("Uploading file...")
|
596
|
-
logger.
|
597
|
-
logger.
|
598
|
-
logger.
|
622
|
+
logger.debug(f"File size: {file_size}")
|
623
|
+
logger.debug(f"Chunk size: {chunk_size}")
|
624
|
+
logger.debug(f"Number of chunks: {num_chunks}")
|
599
625
|
read_so_far = 0
|
600
626
|
for part_id in range(num_chunks):
|
601
627
|
try:
|
@@ -615,12 +641,12 @@ class ModelBuilder:
|
|
615
641
|
break
|
616
642
|
|
617
643
|
if read_so_far == file_size:
|
618
|
-
logger.info("
|
644
|
+
logger.info("Upload complete!")
|
619
645
|
|
620
646
|
def init_upload_model_version(self, model_version_proto, file_path):
|
621
647
|
file_size = os.path.getsize(file_path)
|
622
|
-
logger.
|
623
|
-
logger.
|
648
|
+
logger.debug(f"Uploading model version of model {self.model_proto.id}")
|
649
|
+
logger.debug(f"Using file '{os.path.basename(file_path)}' of size: {file_size} bytes")
|
624
650
|
result = service_pb2.PostModelVersionsUploadRequest(
|
625
651
|
upload_config=service_pb2.PostModelVersionsUploadConfig(
|
626
652
|
user_app_id=self.client.user_app_id,
|
@@ -655,18 +681,19 @@ class ModelBuilder:
|
|
655
681
|
version_id=self.model_version_id,
|
656
682
|
))
|
657
683
|
status_code = resp.model_version.status.code
|
684
|
+
logs = self.get_model_build_logs()
|
685
|
+
for log_entry in logs.log_entries:
|
686
|
+
if log_entry.url not in seen_logs:
|
687
|
+
seen_logs.add(log_entry.url)
|
688
|
+
logger.info(f"{escape(log_entry.message.strip())}")
|
658
689
|
if status_code == status_code_pb2.MODEL_BUILDING:
|
659
690
|
print(f"Model is building... (elapsed {time.time() - st:.1f}s)", end='\r', flush=True)
|
660
691
|
|
661
692
|
# Fetch and display the logs
|
662
|
-
logs = self.get_model_build_logs()
|
663
|
-
for log_entry in logs.log_entries:
|
664
|
-
if log_entry.url not in seen_logs:
|
665
|
-
seen_logs.add(log_entry.url)
|
666
|
-
logger.info(f"{escape(log_entry.message.strip())}")
|
667
693
|
time.sleep(1)
|
668
694
|
elif status_code == status_code_pb2.MODEL_TRAINED:
|
669
|
-
logger.info(
|
695
|
+
logger.info("Model build complete!")
|
696
|
+
logger.info(f"Build time elapsed {time.time() - st:.1f}s)")
|
670
697
|
logger.info(f"Check out the model at {self.model_url} version: {self.model_version_id}")
|
671
698
|
return True
|
672
699
|
else:
|
@@ -475,7 +475,7 @@ def main(model_path,
|
|
475
475
|
keep_env=False,
|
476
476
|
keep_image=False):
|
477
477
|
|
478
|
-
if not os.environ
|
478
|
+
if not os.environ.get("CLARIFAI_PAT", None):
|
479
479
|
logger.error(
|
480
480
|
"CLARIFAI_PAT environment variable is not set! Please set your PAT in the 'CLARIFAI_PAT' environment variable."
|
481
481
|
)
|
clarifai/runners/utils/const.py
CHANGED
@@ -2,9 +2,10 @@ import os
|
|
2
2
|
|
3
3
|
registry = os.environ.get('CLARIFAI_BASE_IMAGE_REGISTRY', 'public.ecr.aws/clarifai-models')
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
5
|
+
GIT_SHA = "df565436eea93efb3e8d1eb558a0a46df29523ec"
|
6
|
+
|
7
|
+
PYTHON_BASE_IMAGE = registry + '/python-base:{python_version}-' + GIT_SHA
|
8
|
+
TORCH_BASE_IMAGE = registry + '/torch:{torch_version}-py{python_version}-cuda{cuda_version}-' + GIT_SHA
|
8
9
|
|
9
10
|
# List of available python base images
|
10
11
|
AVAILABLE_PYTHON_IMAGES = ['3.11', '3.12']
|
@@ -14,13 +15,10 @@ DEFAULT_PYTHON_VERSION = 3.12
|
|
14
15
|
# List of available torch images
|
15
16
|
# Keep sorted by most recent cuda version.
|
16
17
|
AVAILABLE_TORCH_IMAGES = [
|
17
|
-
'2.4.0-py3.11-cuda124',
|
18
18
|
'2.4.1-py3.11-cuda124',
|
19
19
|
'2.5.1-py3.11-cuda124',
|
20
|
-
'2.4.0-py3.12-cuda124',
|
21
20
|
'2.4.1-py3.12-cuda124',
|
22
21
|
'2.5.1-py3.12-cuda124',
|
23
|
-
# '2.4.0-py3.13-cuda124',
|
24
22
|
# '2.4.1-py3.13-cuda124',
|
25
23
|
# '2.5.1-py3.13-cuda124',
|
26
24
|
]
|
clarifai/runners/utils/loader.py
CHANGED
@@ -162,6 +162,18 @@ class HuggingFaceLoader:
|
|
162
162
|
return os.path.exists(checkpoint_path) and os.path.exists(
|
163
163
|
os.path.join(checkpoint_path, 'config.json'))
|
164
164
|
|
165
|
+
@staticmethod
|
166
|
+
def validate_concept(checkpoint_path: str):
|
167
|
+
# check if downloaded concept exists in hf model
|
168
|
+
config_path = os.path.join(checkpoint_path, 'config.json')
|
169
|
+
with open(config_path, 'r') as f:
|
170
|
+
config = json.load(f)
|
171
|
+
|
172
|
+
labels = config.get('id2label', None)
|
173
|
+
if labels:
|
174
|
+
return True
|
175
|
+
return False
|
176
|
+
|
165
177
|
@staticmethod
|
166
178
|
def fetch_labels(checkpoint_path: str):
|
167
179
|
# Fetch labels for classification, detection and segmentation models
|
clarifai/utils/logging.py
CHANGED
@@ -142,8 +142,12 @@ def _configure_logger(name: str, logger_level: Union[int, str] = logging.NOTSET)
|
|
142
142
|
logger.addHandler(handler)
|
143
143
|
else:
|
144
144
|
# Add the new rich handler and formatter
|
145
|
+
try:
|
146
|
+
width, _ = os.get_terminal_size()
|
147
|
+
except OSError:
|
148
|
+
width = 255
|
145
149
|
handler = RichHandler(
|
146
|
-
rich_tracebacks=True, log_time_format="%Y-%m-%d %H:%M:%S.%f", console=Console(width=
|
150
|
+
rich_tracebacks=True, log_time_format="%Y-%m-%d %H:%M:%S.%f", console=Console(width=width))
|
147
151
|
formatter = logging.Formatter('%(message)s')
|
148
152
|
handler.setFormatter(formatter)
|
149
153
|
logger.addHandler(handler)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: clarifai
|
3
|
-
Version: 11.1.
|
3
|
+
Version: 11.1.3
|
4
4
|
Summary: Clarifai Python SDK
|
5
5
|
Home-page: https://github.com/Clarifai/clarifai-python
|
6
6
|
Author: Clarifai
|
@@ -29,9 +29,9 @@ Requires-Dist: PyYAML>=6.0.1
|
|
29
29
|
Requires-Dist: schema==0.7.5
|
30
30
|
Requires-Dist: Pillow>=9.5.0
|
31
31
|
Requires-Dist: tabulate>=0.9.0
|
32
|
-
Requires-Dist: fsspec
|
33
|
-
Requires-Dist: click
|
34
|
-
Requires-Dist: requests
|
32
|
+
Requires-Dist: fsspec>=2024.6.1
|
33
|
+
Requires-Dist: click>=8.1.7
|
34
|
+
Requires-Dist: requests>=2.32.3
|
35
35
|
Provides-Extra: all
|
36
36
|
Requires-Dist: pycocotools==2.0.6; extra == "all"
|
37
37
|
Dynamic: author
|
@@ -1,4 +1,4 @@
|
|
1
|
-
clarifai/__init__.py,sha256=
|
1
|
+
clarifai/__init__.py,sha256=zXnN8vTFtOShwHlre_qBxRyaLcXDT3l9V0CuoZEkV5Y,23
|
2
2
|
clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
clarifai/errors.py,sha256=RwzTajwds51wLD0MVlMC5kcpBnzRpreDLlazPSBZxrg,2605
|
4
4
|
clarifai/versions.py,sha256=jctnczzfGk_S3EnVqb2FjRKfSREkNmvNEwAAa_VoKiQ,222
|
@@ -63,26 +63,26 @@ clarifai/rag/rag.py,sha256=bqUWnfdf91OYMucEK0_rJXDwg0oKjz5c7eda-9CPXu8,12680
|
|
63
63
|
clarifai/rag/utils.py,sha256=yr1jAcbpws4vFGBqlAwPPE7v1DRba48g8gixLFw8OhQ,4070
|
64
64
|
clarifai/runners/__init__.py,sha256=FcTqyCvPn9lJFDsi2eGZ-YL8LgPhJmRAS8K5Wobk03s,411
|
65
65
|
clarifai/runners/server.py,sha256=Wp3bUHNudFV3Ew2egU7X6f3TCTbBgV__15Rw2yd9jY0,3649
|
66
|
-
clarifai/runners/dockerfile_template/Dockerfile.template,sha256
|
66
|
+
clarifai/runners/dockerfile_template/Dockerfile.template,sha256=oY_1aOYxQm9XdrV31PfMHGZIQKqEvTJDhPRTIR8Tr4o,3091
|
67
67
|
clarifai/runners/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
68
68
|
clarifai/runners/models/base_typed_model.py,sha256=0QCWxch8CcyJSKvE1D4PILd2RSnQZHTmx4DXlQQ6dpo,7856
|
69
|
-
clarifai/runners/models/model_builder.py,sha256=
|
69
|
+
clarifai/runners/models/model_builder.py,sha256=R5Otrr3Pxn409k8Tlam8I50LM_wohJcfWX8_NlLHzV4,29461
|
70
70
|
clarifai/runners/models/model_class.py,sha256=9JSPAr4U4K7xI0kSl-q0mHB06zknm2OR-8XIgBCto94,1611
|
71
|
-
clarifai/runners/models/model_run_locally.py,sha256=
|
71
|
+
clarifai/runners/models/model_run_locally.py,sha256=MGfWe0_IO6OMc8CRD8HpVDUKX7spcFVU6wXukml4OXs,20563
|
72
72
|
clarifai/runners/models/model_runner.py,sha256=PyxwK-33hLlhkD07tTXkjWZ_iNlZHl9_8AZ2W7WfExI,6097
|
73
73
|
clarifai/runners/models/model_servicer.py,sha256=jtQmtGeQlvQ5ttMvVw7CMnNzq-rLkTaxR2IWF9SnHwk,2808
|
74
74
|
clarifai/runners/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
75
|
-
clarifai/runners/utils/const.py,sha256=
|
75
|
+
clarifai/runners/utils/const.py,sha256=9YvnH59uh2BQWX8x0jvb8aP0Cb6YQtDiZNviuzi1CVE,845
|
76
76
|
clarifai/runners/utils/data_handler.py,sha256=sxy9zlAgI6ETuxCQhUgEXAn2GCsaW1GxpK6GTaMne0g,6966
|
77
77
|
clarifai/runners/utils/data_utils.py,sha256=R1iQ82TuQ9JwxCJk8yEB1Lyb0BYVhVbWJI9YDi1zGOs,318
|
78
|
-
clarifai/runners/utils/loader.py,sha256=
|
78
|
+
clarifai/runners/utils/loader.py,sha256=SgNHMwRmCCymFQm8aDp73NmIUHhM-N60CBlTKbPzmVc,7470
|
79
79
|
clarifai/runners/utils/url_fetcher.py,sha256=v_8JOWmkyFAzsBulsieKX7Nfjy1Yg7wGSZeqfEvw2cg,1640
|
80
80
|
clarifai/schema/search.py,sha256=JjTi8ammJgZZ2OGl4K6tIA4zEJ1Fr2ASZARXavI1j5c,2448
|
81
81
|
clarifai/urls/helper.py,sha256=tjoMGGHuWX68DUB0pk4MEjrmFsClUAQj2jmVEM_Sy78,4751
|
82
82
|
clarifai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
83
83
|
clarifai/utils/cli.py,sha256=CdcLsF00KdfA-BgMIbO-u88gUF9Ts1n0TDDZS-oImp8,1949
|
84
84
|
clarifai/utils/constants.py,sha256=MG_iHnSwNEyUZOpvsrTicNwaT4CIjmlK_Ixk_qqEX8g,142
|
85
|
-
clarifai/utils/logging.py,sha256=
|
85
|
+
clarifai/utils/logging.py,sha256=CVy8OsLrlbg-b8qe88kb1yO_9wi9wRYfF-QkIaN9xE8,11936
|
86
86
|
clarifai/utils/misc.py,sha256=FTmjnjkvytSVb712J2qndVDYD8XUL70LE-G-5PNL3m4,2601
|
87
87
|
clarifai/utils/model_train.py,sha256=Mndqy5GNu7kjQHjDyNVyamL0hQFLGSHcWhOuPyOvr1w,8005
|
88
88
|
clarifai/utils/evaluation/__init__.py,sha256=PYkurUrXrGevByj7RFb6CoU1iC7fllyQSfnnlo9WnY8,69
|
@@ -93,9 +93,9 @@ clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
93
93
|
clarifai/workflows/export.py,sha256=vICRhIreqDSShxLKjHNM2JwzKsf1B4fdXB0ciMcA70k,1945
|
94
94
|
clarifai/workflows/utils.py,sha256=nGeB_yjVgUO9kOeKTg4OBBaBz-AwXI3m-huSVj-9W18,1924
|
95
95
|
clarifai/workflows/validate.py,sha256=yJq03MaJqi5AK3alKGJJBR89xmmjAQ31sVufJUiOqY8,2556
|
96
|
-
clarifai-11.1.
|
97
|
-
clarifai-11.1.
|
98
|
-
clarifai-11.1.
|
99
|
-
clarifai-11.1.
|
100
|
-
clarifai-11.1.
|
101
|
-
clarifai-11.1.
|
96
|
+
clarifai-11.1.3.dist-info/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
|
97
|
+
clarifai-11.1.3.dist-info/METADATA,sha256=5ZiW5qdV2Gh6wwxOgt_Sm_XToJRMmZgoerv-xWpYOPA,22419
|
98
|
+
clarifai-11.1.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
99
|
+
clarifai-11.1.3.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
|
100
|
+
clarifai-11.1.3.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
|
101
|
+
clarifai-11.1.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|