clarifai 10.9.5__py3-none-any.whl → 10.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/cli/README.md +87 -0
- clarifai/cli/base.py +41 -3
- clarifai/cli/compute_cluster.py +51 -0
- clarifai/cli/deployment.py +84 -0
- clarifai/cli/model.py +1 -1
- clarifai/cli/nodepool.py +91 -0
- clarifai/client/auth/helper.py +1 -0
- clarifai/client/compute_cluster.py +5 -4
- clarifai/client/dataset.py +21 -0
- clarifai/client/deployment.py +2 -1
- clarifai/client/input.py +27 -0
- clarifai/client/nodepool.py +5 -4
- clarifai/client/user.py +6 -5
- clarifai/datasets/upload/multimodal.py +0 -2
- clarifai/runners/dockerfile_template/{Dockerfile.cpu.template → Dockerfile.template} +21 -11
- clarifai/runners/models/model_upload.py +100 -12
- clarifai/utils/cli.py +25 -0
- {clarifai-10.9.5.dist-info → clarifai-10.10.1.dist-info}/METADATA +1 -1
- {clarifai-10.9.5.dist-info → clarifai-10.10.1.dist-info}/RECORD +24 -21
- {clarifai-10.9.5.dist-info → clarifai-10.10.1.dist-info}/WHEEL +1 -1
- clarifai/runners/dockerfile_template/Dockerfile.cuda.template +0 -83
- {clarifai-10.9.5.dist-info → clarifai-10.10.1.dist-info}/LICENSE +0 -0
- {clarifai-10.9.5.dist-info → clarifai-10.10.1.dist-info}/entry_points.txt +0 -0
- {clarifai-10.9.5.dist-info → clarifai-10.10.1.dist-info}/top_level.txt +0 -0
clarifai/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "10.
|
1
|
+
__version__ = "10.10.1"
|
clarifai/cli/README.md
ADDED
@@ -0,0 +1,87 @@
|
|
1
|
+
# Clarifai CLI
|
2
|
+
|
3
|
+
## Overview
|
4
|
+
|
5
|
+
Clarifai offers a user-friendly interface for deploying your local model into production with Clarifai, featuring:
|
6
|
+
|
7
|
+
* A convenient command-line interface (CLI)
|
8
|
+
* Easy implementation and testing in Python
|
9
|
+
* No need for MLops expertise.
|
10
|
+
|
11
|
+
## Compute Orchestration
|
12
|
+
|
13
|
+
Quick example for deploying a `visual-classifier` model
|
14
|
+
|
15
|
+
### Login
|
16
|
+
|
17
|
+
First, login to cli using clarifai account details in a config file as shown below:
|
18
|
+
|
19
|
+
```bash
|
20
|
+
$ clarifai login --config <config-filepath>
|
21
|
+
```
|
22
|
+
|
23
|
+
### Setup
|
24
|
+
|
25
|
+
To prepare for deployment step, we have to setup a Compute Cluster with Nodepool of required server config to deploy the model.
|
26
|
+
|
27
|
+
So, First, create a new Compute Cluster
|
28
|
+
```bash
|
29
|
+
$ clarifai computecluster create --config <compute-cluster-config-filepath>
|
30
|
+
```
|
31
|
+
|
32
|
+
Then, create a new Nodepool in the created Compute Cluster
|
33
|
+
```bash
|
34
|
+
$ clarifai nodepool create --config <nodepool-config-filepath>
|
35
|
+
```
|
36
|
+
|
37
|
+
### Deployment
|
38
|
+
|
39
|
+
After setup, we can deploy the `visual-classifier` model using a deployment config file as shown below:
|
40
|
+
|
41
|
+
```bash
|
42
|
+
$ clarifai deployment create --config <deployment-config-filepath>
|
43
|
+
```
|
44
|
+
|
45
|
+
### List Resources
|
46
|
+
|
47
|
+
List out existing Compute Clusters:
|
48
|
+
|
49
|
+
```bash
|
50
|
+
$ clarifai computecluster list
|
51
|
+
```
|
52
|
+
|
53
|
+
List out existing Nodepools:
|
54
|
+
|
55
|
+
```bash
|
56
|
+
$ clarifai nodepool list --compute_cluster_id <compute-cluster-id>
|
57
|
+
```
|
58
|
+
|
59
|
+
List out existing Deployments:
|
60
|
+
|
61
|
+
```bash
|
62
|
+
$ clarifai deployment list --nodepool_id <nodepool-id>
|
63
|
+
```
|
64
|
+
|
65
|
+
### Delete Resources
|
66
|
+
|
67
|
+
Delete existing Deployment:
|
68
|
+
|
69
|
+
```bash
|
70
|
+
$ clarifai deployment delete --nodepool_id <nodepool-id> --deployment_id <deployment-id>
|
71
|
+
```
|
72
|
+
|
73
|
+
Delete existing Nodepool:
|
74
|
+
|
75
|
+
```bash
|
76
|
+
$ clarifai nodepool delete --compute_cluster_id <compute-cluster-id> --nodepool_id <nodepool-id>
|
77
|
+
```
|
78
|
+
|
79
|
+
Delete existing Compute Clusters:
|
80
|
+
|
81
|
+
```bash
|
82
|
+
$ clarifai computecluster delete --compute_cluster_id <compute-cluster-id>
|
83
|
+
```
|
84
|
+
|
85
|
+
## Learn More
|
86
|
+
|
87
|
+
* [Example Configs](https://github.com/Clarifai/examples/tree/main/ComputeOrchestration/configs)
|
clarifai/cli/base.py
CHANGED
@@ -5,7 +5,38 @@ import click
|
|
5
5
|
from ..utils.cli import dump_yaml, from_yaml, load_command_modules, set_base_url
|
6
6
|
|
7
7
|
|
8
|
-
|
8
|
+
class CustomMultiGroup(click.Group):
|
9
|
+
|
10
|
+
def group(self, *args, **kwargs):
|
11
|
+
"""Behaves the same as `click.Group.group()` except if passed
|
12
|
+
a list of names, all after the first will be aliases for the first.
|
13
|
+
"""
|
14
|
+
|
15
|
+
def decorator(f):
|
16
|
+
aliased_group = []
|
17
|
+
if isinstance(args[0], list):
|
18
|
+
# we have a list so create group aliases
|
19
|
+
_args = [args[0][0]] + list(args[1:])
|
20
|
+
for alias in args[0][1:]:
|
21
|
+
grp = super(CustomMultiGroup, self).group(alias, *args[1:], **kwargs)(f)
|
22
|
+
grp.short_help = "Alias for '{}'".format(_args[0])
|
23
|
+
aliased_group.append(grp)
|
24
|
+
else:
|
25
|
+
_args = args
|
26
|
+
|
27
|
+
# create the main group
|
28
|
+
grp = super(CustomMultiGroup, self).group(*_args, **kwargs)(f)
|
29
|
+
|
30
|
+
# for all of the aliased groups, share the main group commands
|
31
|
+
for aliased in aliased_group:
|
32
|
+
aliased.commands = grp.commands
|
33
|
+
|
34
|
+
return grp
|
35
|
+
|
36
|
+
return decorator
|
37
|
+
|
38
|
+
|
39
|
+
@click.group(cls=CustomMultiGroup)
|
9
40
|
@click.pass_context
|
10
41
|
def cli(ctx):
|
11
42
|
"""Clarifai CLI"""
|
@@ -49,9 +80,16 @@ def login(ctx, config, env, user_id):
|
|
49
80
|
if user_id:
|
50
81
|
ctx.obj['user_id'] = user_id
|
51
82
|
os.environ["CLARIFAI_USER_ID"] = ctx.obj['user_id']
|
52
|
-
elif 'user_id' in ctx.obj
|
53
|
-
ctx.obj['user_id'] = ctx.obj.get('user_id',
|
83
|
+
elif 'user_id' in ctx.obj:
|
84
|
+
ctx.obj['user_id'] = ctx.obj.get('user_id', "")
|
54
85
|
os.environ["CLARIFAI_USER_ID"] = ctx.obj['user_id']
|
86
|
+
elif 'CLARIFAI_USER_ID' in os.environ:
|
87
|
+
ctx.obj['user_id'] = os.environ["CLARIFAI_USER_ID"]
|
88
|
+
else:
|
89
|
+
user_id = click.prompt("Pass the User ID here", type=str)
|
90
|
+
os.environ["CLARIFAI_USER_ID"] = user_id
|
91
|
+
ctx.obj['user_id'] = user_id
|
92
|
+
click.echo("User ID saved successfully.")
|
55
93
|
|
56
94
|
if env:
|
57
95
|
ctx.obj['env'] = env
|
@@ -0,0 +1,51 @@
|
|
1
|
+
import click
|
2
|
+
from clarifai.cli.base import cli
|
3
|
+
from clarifai.client.user import User
|
4
|
+
from clarifai.utils.cli import display_co_resources
|
5
|
+
|
6
|
+
|
7
|
+
@cli.group(['computecluster', 'cc'])
|
8
|
+
def computecluster():
|
9
|
+
"""Manage Compute Clusters: create, delete, list"""
|
10
|
+
pass
|
11
|
+
|
12
|
+
|
13
|
+
@computecluster.command()
|
14
|
+
@click.option(
|
15
|
+
'--config',
|
16
|
+
type=click.Path(exists=True),
|
17
|
+
required=True,
|
18
|
+
help='Path to the compute cluster config file.')
|
19
|
+
@click.option(
|
20
|
+
'-cc_id',
|
21
|
+
'--compute_cluster_id',
|
22
|
+
required=False,
|
23
|
+
help='New Compute Cluster ID for the compute cluster to create.')
|
24
|
+
@click.pass_context
|
25
|
+
def create(ctx, config, compute_cluster_id):
|
26
|
+
"""Create a new Compute Cluster with the given config file."""
|
27
|
+
user = User(user_id=ctx.obj['user_id'], pat=ctx.obj['pat'], base_url=ctx.obj['base_url'])
|
28
|
+
if compute_cluster_id:
|
29
|
+
user.create_compute_cluster(config, compute_cluster_id=compute_cluster_id)
|
30
|
+
else:
|
31
|
+
user.create_compute_cluster(config)
|
32
|
+
|
33
|
+
|
34
|
+
@computecluster.command()
|
35
|
+
@click.option('--page_no', required=False, help='Page number to list.', default=1)
|
36
|
+
@click.option('--per_page', required=False, help='Number of items per page.', default=16)
|
37
|
+
@click.pass_context
|
38
|
+
def list(ctx, page_no, per_page):
|
39
|
+
"""List all compute clusters for the user."""
|
40
|
+
user = User(user_id=ctx.obj['user_id'], pat=ctx.obj['pat'], base_url=ctx.obj['base_url'])
|
41
|
+
response = user.list_compute_clusters(page_no, per_page)
|
42
|
+
display_co_resources(response, "Compute Cluster")
|
43
|
+
|
44
|
+
|
45
|
+
@computecluster.command()
|
46
|
+
@click.option('-cc_id', '--compute_cluster_id', help='Compute Cluster ID of the user to delete.')
|
47
|
+
@click.pass_context
|
48
|
+
def delete(ctx, compute_cluster_id):
|
49
|
+
"""Deletes a compute cluster for the user."""
|
50
|
+
user = User(user_id=ctx.obj['user_id'], pat=ctx.obj['pat'], base_url=ctx.obj['base_url'])
|
51
|
+
user.delete_compute_clusters([compute_cluster_id])
|
@@ -0,0 +1,84 @@
|
|
1
|
+
import click
|
2
|
+
from clarifai.cli.base import cli
|
3
|
+
from clarifai.client.nodepool import Nodepool
|
4
|
+
from clarifai.utils.cli import display_co_resources, from_yaml
|
5
|
+
|
6
|
+
|
7
|
+
@cli.group(['deployment', 'dpl'])
|
8
|
+
def deployment():
|
9
|
+
"""Manage Deployments: create, delete, list"""
|
10
|
+
pass
|
11
|
+
|
12
|
+
|
13
|
+
@deployment.command()
|
14
|
+
@click.option(
|
15
|
+
'-np_id',
|
16
|
+
'--nodepool_id',
|
17
|
+
required=False,
|
18
|
+
help='Nodepool ID for the Nodepool to interact with.')
|
19
|
+
@click.option(
|
20
|
+
'--config',
|
21
|
+
type=click.Path(exists=True),
|
22
|
+
required=True,
|
23
|
+
help='Path to the deployment config file.')
|
24
|
+
@click.option(
|
25
|
+
'-dpl_id',
|
26
|
+
'--deployment_id',
|
27
|
+
required=False,
|
28
|
+
help='New deployment ID for the deployment to create.')
|
29
|
+
@click.pass_context
|
30
|
+
def create(ctx, nodepool_id, config, deployment_id):
|
31
|
+
"""Create a new Deployment with the given config file."""
|
32
|
+
if not nodepool_id:
|
33
|
+
deployment_config = from_yaml(config)
|
34
|
+
nodepool_id = deployment_config['deployment']['nodepools'][0]['id']
|
35
|
+
|
36
|
+
nodepool = Nodepool(
|
37
|
+
nodepool_id=nodepool_id,
|
38
|
+
user_id=ctx.obj['user_id'],
|
39
|
+
pat=ctx.obj['pat'],
|
40
|
+
base_url=ctx.obj['base_url'])
|
41
|
+
if deployment_id:
|
42
|
+
nodepool.create_deployment(config, deployment_id=deployment_id)
|
43
|
+
else:
|
44
|
+
nodepool.create_deployment(config)
|
45
|
+
|
46
|
+
|
47
|
+
@deployment.command()
|
48
|
+
@click.option(
|
49
|
+
'-np_id',
|
50
|
+
'--nodepool_id',
|
51
|
+
required=True,
|
52
|
+
help='Nodepool ID for the Nodepool to interact with.')
|
53
|
+
@click.option('--page_no', required=False, help='Page number to list.', default=1)
|
54
|
+
@click.option('--per_page', required=False, help='Number of items per page.', default=16)
|
55
|
+
@click.pass_context
|
56
|
+
def list(ctx, nodepool_id, page_no, per_page):
|
57
|
+
"""List all deployments for the nodepool."""
|
58
|
+
|
59
|
+
nodepool = Nodepool(
|
60
|
+
nodepool_id=nodepool_id,
|
61
|
+
user_id=ctx.obj['user_id'],
|
62
|
+
pat=ctx.obj['pat'],
|
63
|
+
base_url=ctx.obj['base_url'])
|
64
|
+
response = nodepool.list_deployments(page_no=page_no, per_page=per_page)
|
65
|
+
display_co_resources(response, "Deployment")
|
66
|
+
|
67
|
+
|
68
|
+
@deployment.command()
|
69
|
+
@click.option(
|
70
|
+
'-np_id',
|
71
|
+
'--nodepool_id',
|
72
|
+
required=True,
|
73
|
+
help='Nodepool ID for the Nodepool to interact with.')
|
74
|
+
@click.option('-dpl_id', '--deployment_id', help='Deployment ID of the nodepool to delete.')
|
75
|
+
@click.pass_context
|
76
|
+
def delete(ctx, nodepool_id, deployment_id):
|
77
|
+
"""Deletes a deployment for the nodepool."""
|
78
|
+
|
79
|
+
nodepool = Nodepool(
|
80
|
+
nodepool_id=nodepool_id,
|
81
|
+
user_id=ctx.obj['user_id'],
|
82
|
+
pat=ctx.obj['pat'],
|
83
|
+
base_url=ctx.obj['base_url'])
|
84
|
+
nodepool.delete_deployments([deployment_id])
|
clarifai/cli/model.py
CHANGED
clarifai/cli/nodepool.py
ADDED
@@ -0,0 +1,91 @@
|
|
1
|
+
import click
|
2
|
+
from clarifai.cli.base import cli
|
3
|
+
from clarifai.client.compute_cluster import ComputeCluster
|
4
|
+
from clarifai.utils.cli import display_co_resources, dump_yaml, from_yaml
|
5
|
+
|
6
|
+
|
7
|
+
@cli.group(['nodepool', 'np'])
|
8
|
+
def nodepool():
|
9
|
+
"""Manage Nodepools: create, delete, list"""
|
10
|
+
pass
|
11
|
+
|
12
|
+
|
13
|
+
@nodepool.command()
|
14
|
+
@click.option(
|
15
|
+
'-cc_id',
|
16
|
+
'--compute_cluster_id',
|
17
|
+
required=False,
|
18
|
+
help='Compute Cluster ID for the compute cluster to interact with.')
|
19
|
+
@click.option(
|
20
|
+
'--config',
|
21
|
+
type=click.Path(exists=True),
|
22
|
+
required=True,
|
23
|
+
help='Path to the nodepool config file.')
|
24
|
+
@click.option(
|
25
|
+
'-np_id', '--nodepool_id', required=False, help='New Nodepool ID for the nodepool to create.')
|
26
|
+
@click.pass_context
|
27
|
+
def create(ctx, compute_cluster_id, config, nodepool_id):
|
28
|
+
"""Create a new Nodepool with the given config file."""
|
29
|
+
|
30
|
+
nodepool_config = from_yaml(config)
|
31
|
+
if not compute_cluster_id:
|
32
|
+
if 'compute_cluster' not in nodepool_config['nodepool']:
|
33
|
+
click.echo(
|
34
|
+
"Please provide a compute cluster ID either in the config file or using --compute_cluster_id flag",
|
35
|
+
err=True)
|
36
|
+
return
|
37
|
+
compute_cluster_id = nodepool_config['nodepool']['compute_cluster']['id']
|
38
|
+
else:
|
39
|
+
if 'compute_cluster' not in nodepool_config['nodepool']:
|
40
|
+
nodepool_config['nodepool']['compute_cluster']['id'] = compute_cluster_id
|
41
|
+
dump_yaml(config, nodepool_config)
|
42
|
+
|
43
|
+
compute_cluster = ComputeCluster(
|
44
|
+
compute_cluster_id=compute_cluster_id,
|
45
|
+
user_id=ctx.obj['user_id'],
|
46
|
+
pat=ctx.obj['pat'],
|
47
|
+
base_url=ctx.obj['base_url'])
|
48
|
+
if nodepool_id:
|
49
|
+
compute_cluster.create_nodepool(config, nodepool_id=nodepool_id)
|
50
|
+
else:
|
51
|
+
compute_cluster.create_nodepool(config)
|
52
|
+
|
53
|
+
|
54
|
+
@nodepool.command()
|
55
|
+
@click.option(
|
56
|
+
'-cc_id',
|
57
|
+
'--compute_cluster_id',
|
58
|
+
required=True,
|
59
|
+
help='Compute Cluster ID for the compute cluster to interact with.')
|
60
|
+
@click.option('--page_no', required=False, help='Page number to list.', default=1)
|
61
|
+
@click.option('--per_page', required=False, help='Number of items per page.', default=16)
|
62
|
+
@click.pass_context
|
63
|
+
def list(ctx, compute_cluster_id, page_no, per_page):
|
64
|
+
"""List all nodepools for the user."""
|
65
|
+
|
66
|
+
compute_cluster = ComputeCluster(
|
67
|
+
compute_cluster_id=compute_cluster_id,
|
68
|
+
user_id=ctx.obj['user_id'],
|
69
|
+
pat=ctx.obj['pat'],
|
70
|
+
base_url=ctx.obj['base_url'])
|
71
|
+
response = compute_cluster.list_nodepools(page_no, per_page)
|
72
|
+
display_co_resources(response, "Nodepool")
|
73
|
+
|
74
|
+
|
75
|
+
@nodepool.command()
|
76
|
+
@click.option(
|
77
|
+
'-cc_id',
|
78
|
+
'--compute_cluster_id',
|
79
|
+
required=True,
|
80
|
+
help='Compute Cluster ID for the compute cluster to interact with.')
|
81
|
+
@click.option('-np_id', '--nodepool_id', help='Nodepool ID of the user to delete.')
|
82
|
+
@click.pass_context
|
83
|
+
def delete(ctx, compute_cluster_id, nodepool_id):
|
84
|
+
"""Deletes a nodepool for the user."""
|
85
|
+
|
86
|
+
compute_cluster = ComputeCluster(
|
87
|
+
compute_cluster_id=compute_cluster_id,
|
88
|
+
user_id=ctx.obj['user_id'],
|
89
|
+
pat=ctx.obj['pat'],
|
90
|
+
base_url=ctx.obj['base_url'])
|
91
|
+
compute_cluster.delete_nodepools([nodepool_id])
|
clarifai/client/auth/helper.py
CHANGED
@@ -321,6 +321,7 @@ Additionally, these optional params are supported:
|
|
321
321
|
|
322
322
|
def set_base(self, base: str):
|
323
323
|
""" Set the base domain for the API. """
|
324
|
+
base = DEFAULT_BASE if base is None else base
|
324
325
|
self._base = https_cache(base_https_cache, base)
|
325
326
|
|
326
327
|
def set_ui(self, ui: str):
|
@@ -102,12 +102,12 @@ class ComputeCluster(Lister, BaseClient):
|
|
102
102
|
nodepool["visibility"] = resources_pb2.Visibility(**nodepool["visibility"])
|
103
103
|
return nodepool
|
104
104
|
|
105
|
-
def create_nodepool(self,
|
105
|
+
def create_nodepool(self, config_filepath: str, nodepool_id: str = None) -> Nodepool:
|
106
106
|
"""Creates a nodepool for the compute cluster.
|
107
107
|
|
108
108
|
Args:
|
109
|
-
nodepool_id (str): The nodepool ID for the nodepool to create.
|
110
109
|
config_filepath (str): The path to the nodepool config file.
|
110
|
+
nodepool_id (str): New nodepool ID for the nodepool to create.
|
111
111
|
|
112
112
|
Returns:
|
113
113
|
Nodepool: A Nodepool object for the specified nodepool ID.
|
@@ -115,7 +115,7 @@ class ComputeCluster(Lister, BaseClient):
|
|
115
115
|
Example:
|
116
116
|
>>> from clarifai.client.compute_cluster import ComputeCluster
|
117
117
|
>>> compute_cluster = ComputeCluster(compute_cluster_id="compute_cluster_id", user_id="user_id")
|
118
|
-
>>> nodepool = compute_cluster.create_nodepool(
|
118
|
+
>>> nodepool = compute_cluster.create_nodepool(config_filepath="config.yml")
|
119
119
|
"""
|
120
120
|
if not os.path.exists(config_filepath):
|
121
121
|
raise UserError(f"Nodepool config file not found at {config_filepath}")
|
@@ -123,7 +123,8 @@ class ComputeCluster(Lister, BaseClient):
|
|
123
123
|
nodepool_config = self._process_nodepool_config(config_filepath)
|
124
124
|
|
125
125
|
if 'id' in nodepool_config:
|
126
|
-
nodepool_id
|
126
|
+
if nodepool_id is None:
|
127
|
+
nodepool_id = nodepool_config['id']
|
127
128
|
nodepool_config.pop('id')
|
128
129
|
|
129
130
|
request = service_pb2.PostNodepoolsRequest(
|
clarifai/client/dataset.py
CHANGED
@@ -9,6 +9,7 @@ from typing import Dict, Generator, List, Optional, Tuple, Type, TypeVar, Union
|
|
9
9
|
|
10
10
|
import requests
|
11
11
|
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
|
12
|
+
from clarifai_grpc.grpc.api.resources_pb2 import Input
|
12
13
|
from clarifai_grpc.grpc.api.service_pb2 import MultiInputResponse
|
13
14
|
from clarifai_grpc.grpc.api.status import status_code_pb2, status_pb2
|
14
15
|
from google.protobuf.json_format import MessageToDict
|
@@ -190,6 +191,26 @@ class Dataset(Lister, BaseClient):
|
|
190
191
|
}
|
191
192
|
yield Dataset.from_auth_helper(self.auth_helper, **kwargs)
|
192
193
|
|
194
|
+
def list_inputs(self, page_no: int = None, per_page: int = None,
|
195
|
+
input_type: str = None) -> Generator[Input, None, None]:
|
196
|
+
"""Lists all the inputs for the dataset.
|
197
|
+
|
198
|
+
Args:
|
199
|
+
page_no (int): The page number to list.
|
200
|
+
per_page (int): The number of items per page.
|
201
|
+
input_type (str): The type of input to list. Options: 'image', 'video', 'audio', 'text'.
|
202
|
+
|
203
|
+
Yields:
|
204
|
+
Input: Input objects in the dataset.
|
205
|
+
|
206
|
+
Example:
|
207
|
+
>>> from clarifai.client.dataset import Dataset
|
208
|
+
>>> dataset = Dataset(dataset_id='dataset_id', user_id='user_id', app_id='app_id')
|
209
|
+
>>> all_dataset_inputs = list(dataset.list_inputs())
|
210
|
+
"""
|
211
|
+
return self.input_object.list_inputs(
|
212
|
+
dataset_id=self.id, page_no=page_no, per_page=per_page, input_type=input_type)
|
213
|
+
|
193
214
|
def __iter__(self):
|
194
215
|
return iter(DatasetExportReader(archive_url=self.archive_zip()))
|
195
216
|
|
clarifai/client/deployment.py
CHANGED
@@ -49,7 +49,8 @@ class Deployment(Lister, BaseClient):
|
|
49
49
|
Returns:
|
50
50
|
resources_pb2.RunnerSelector: A RunnerSelector object for the given deployment_id.
|
51
51
|
"""
|
52
|
-
return resources_pb2.RunnerSelector(
|
52
|
+
return resources_pb2.RunnerSelector(
|
53
|
+
deployment=resources_pb2.Deployment(id=deployment_id, user_id=user_id))
|
53
54
|
|
54
55
|
def __getattr__(self, name):
|
55
56
|
return getattr(self.deployment_info, name)
|
clarifai/client/input.py
CHANGED
@@ -867,6 +867,33 @@ class Inputs(Lister, BaseClient):
|
|
867
867
|
raise Exception(response.status)
|
868
868
|
self.logger.info("\nInputs Deleted\n%s", response.status)
|
869
869
|
|
870
|
+
def delete_annotations(self, input_ids: List[str], annotation_ids: List[str] = []) -> None:
|
871
|
+
"""Delete list of annotations of input objects from the app.
|
872
|
+
|
873
|
+
Args:
|
874
|
+
input_ids (Input): List of input objects for which annotations to delete.
|
875
|
+
annotation_ids (List[str]): List of annotation ids to delete.
|
876
|
+
|
877
|
+
Example:
|
878
|
+
>>> from clarifai.client.user import User
|
879
|
+
>>> input_obj = User(user_id="user_id").app(app_id="app_id").inputs()
|
880
|
+
>>> input_obj.delete_annotations(input_ids=['input_id_1', 'input_id_2'])
|
881
|
+
|
882
|
+
Note:
|
883
|
+
'annotation_ids' are optional but if the are provided, the number and order in
|
884
|
+
'annotation_ids' and 'input_ids' should match
|
885
|
+
"""
|
886
|
+
if not isinstance(input_ids, list):
|
887
|
+
raise UserError("input_ids must be a list of input ids")
|
888
|
+
if annotation_ids and len(input_ids) != len(annotation_ids):
|
889
|
+
raise UserError("Number of provided annotation_ids and input_ids should match.")
|
890
|
+
request = service_pb2.DeleteAnnotationsRequest(
|
891
|
+
user_app_id=self.user_app_id, ids=annotation_ids, input_ids=input_ids)
|
892
|
+
response = self._grpc_request(self.STUB.DeleteAnnotations, request)
|
893
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
894
|
+
raise Exception(response.status)
|
895
|
+
self.logger.info("\nAnnotations Deleted\n%s", response.status)
|
896
|
+
|
870
897
|
def download_inputs(self, inputs: List[Input]) -> List[bytes]:
|
871
898
|
"""Download list of input objects from the app.
|
872
899
|
|
clarifai/client/nodepool.py
CHANGED
@@ -136,12 +136,12 @@ class Nodepool(Lister, BaseClient):
|
|
136
136
|
nodepool = resources_pb2.Nodepool(id=nodepool_id, compute_cluster=compute_cluster)
|
137
137
|
return resources_pb2.RunnerSelector(nodepool=nodepool)
|
138
138
|
|
139
|
-
def create_deployment(self,
|
139
|
+
def create_deployment(self, config_filepath: str, deployment_id: str = None) -> Deployment:
|
140
140
|
"""Creates a deployment for the nodepool.
|
141
141
|
|
142
142
|
Args:
|
143
|
-
deployment_id (str): The deployment ID for the deployment to create.
|
144
143
|
config_filepath (str): The path to the deployment config file.
|
144
|
+
deployment_id (str): New deployment ID for the deployment to create.
|
145
145
|
|
146
146
|
Returns:
|
147
147
|
Deployment: A Deployment object for the specified deployment ID.
|
@@ -149,7 +149,7 @@ class Nodepool(Lister, BaseClient):
|
|
149
149
|
Example:
|
150
150
|
>>> from clarifai.client.nodepool import Nodepool
|
151
151
|
>>> nodepool = Nodepool(nodepool_id="nodepool_id", user_id="user_id")
|
152
|
-
>>> deployment = nodepool.create_deployment(
|
152
|
+
>>> deployment = nodepool.create_deployment(config_filepath="config.yml")
|
153
153
|
"""
|
154
154
|
if not os.path.exists(config_filepath):
|
155
155
|
raise UserError(f"Deployment config file not found at {config_filepath}")
|
@@ -157,7 +157,8 @@ class Nodepool(Lister, BaseClient):
|
|
157
157
|
deployment_config = self._process_deployment_config(config_filepath)
|
158
158
|
|
159
159
|
if 'id' in deployment_config:
|
160
|
-
deployment_id
|
160
|
+
if deployment_id is None:
|
161
|
+
deployment_id = deployment_config['id']
|
161
162
|
deployment_config.pop('id')
|
162
163
|
|
163
164
|
request = service_pb2.PostDeploymentsRequest(
|
clarifai/client/user.py
CHANGED
@@ -222,13 +222,13 @@ class User(Lister, BaseClient):
|
|
222
222
|
compute_cluster["visibility"] = resources_pb2.Visibility(**compute_cluster["visibility"])
|
223
223
|
return compute_cluster
|
224
224
|
|
225
|
-
def create_compute_cluster(self,
|
226
|
-
|
225
|
+
def create_compute_cluster(self, config_filepath: str,
|
226
|
+
compute_cluster_id: str = None) -> ComputeCluster:
|
227
227
|
"""Creates a compute cluster for the user.
|
228
228
|
|
229
229
|
Args:
|
230
|
-
compute_cluster_id (str): The compute cluster ID for the compute cluster to create.
|
231
230
|
config_filepath (str): The path to the compute cluster config file.
|
231
|
+
compute_cluster_id (str): New compute cluster ID for the compute cluster to create.
|
232
232
|
|
233
233
|
Returns:
|
234
234
|
ComputeCluster: A Compute Cluster object for the specified compute cluster ID.
|
@@ -236,7 +236,7 @@ class User(Lister, BaseClient):
|
|
236
236
|
Example:
|
237
237
|
>>> from clarifai.client.user import User
|
238
238
|
>>> client = User(user_id="user_id")
|
239
|
-
>>> compute_cluster = client.create_compute_cluster(
|
239
|
+
>>> compute_cluster = client.create_compute_cluster(config_filepath="config.yml")
|
240
240
|
"""
|
241
241
|
if not os.path.exists(config_filepath):
|
242
242
|
raise UserError(f"Compute Cluster config file not found at {config_filepath}")
|
@@ -244,7 +244,8 @@ class User(Lister, BaseClient):
|
|
244
244
|
compute_cluster_config = self._process_compute_cluster_config(config_filepath)
|
245
245
|
|
246
246
|
if 'id' in compute_cluster_config:
|
247
|
-
compute_cluster_id
|
247
|
+
if compute_cluster_id is None:
|
248
|
+
compute_cluster_id = compute_cluster_config['id']
|
248
249
|
compute_cluster_config.pop('id')
|
249
250
|
|
250
251
|
request = service_pb2.PostComputeClustersRequest(
|
@@ -6,7 +6,6 @@ from google.protobuf.struct_pb2 import Struct
|
|
6
6
|
|
7
7
|
from clarifai.client.input import Inputs
|
8
8
|
from clarifai.datasets.upload.base import ClarifaiDataLoader, ClarifaiDataset
|
9
|
-
from clarifai.utils.misc import get_uuid
|
10
9
|
|
11
10
|
|
12
11
|
class MultiModalDataset(ClarifaiDataset):
|
@@ -36,7 +35,6 @@ class MultiModalDataset(ClarifaiDataset):
|
|
36
35
|
image_bytes = data_item.image_bytes
|
37
36
|
text = data_item.text
|
38
37
|
labels = data_item.labels if isinstance(data_item.labels, list) else [data_item.labels]
|
39
|
-
id = get_uuid(8)
|
40
38
|
input_id = f"{self.dataset_id}-{id}" if data_item.id is None else f"{self.dataset_id}-{str(data_item.id)}"
|
41
39
|
if data_item.metadata is not None:
|
42
40
|
metadata.update(data_item.metadata)
|
@@ -1,16 +1,16 @@
|
|
1
|
-
|
2
|
-
FROM public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim-bookworm as build
|
1
|
+
FROM --platform=$TARGETPLATFORM ${BASE_IMAGE} as build
|
3
2
|
|
4
|
-
|
5
|
-
WORKDIR /app
|
3
|
+
ENV DEBIAN_FRONTEND=noninteractive
|
6
4
|
|
5
|
+
#############################
|
6
|
+
# User specific requirements
|
7
|
+
#############################
|
7
8
|
COPY requirements.txt .
|
8
|
-
# Install requirements and cleanup before leaving this line.
|
9
|
-
# Note(zeiler): this could be in a future template as {{model_python_deps}}
|
10
|
-
RUN python -m pip install -r requirements.txt && rm -rf /root/.cache
|
11
9
|
|
12
|
-
# Install
|
13
|
-
|
10
|
+
# Install requirements and clarifai package and cleanup before leaving this line.
|
11
|
+
# Note(zeiler): this could be in a future template as {{model_python_deps}}
|
12
|
+
RUN pip install --no-cache-dir -r requirements.txt && \
|
13
|
+
pip install --no-cache-dir clarifai
|
14
14
|
|
15
15
|
# These will be set by the templaing system.
|
16
16
|
ENV CLARIFAI_PAT=${CLARIFAI_PAT}
|
@@ -20,12 +20,22 @@ ENV CLARIFAI_NODEPOOL_ID=${CLARIFAI_NODEPOOL_ID}
|
|
20
20
|
ENV CLARIFAI_COMPUTE_CLUSTER_ID=${CLARIFAI_COMPUTE_CLUSTER_ID}
|
21
21
|
ENV CLARIFAI_API_BASE=${CLARIFAI_API_BASE}
|
22
22
|
|
23
|
+
# Set the NUMBA cache dir to /tmp
|
24
|
+
ENV NUMBA_CACHE_DIR=/tmp/numba_cache
|
25
|
+
ENV HOME=/tmp
|
26
|
+
|
27
|
+
# Set the working directory to /app
|
28
|
+
WORKDIR /app
|
29
|
+
|
23
30
|
# Copy the current folder into /app/model_dir that the SDK will expect.
|
31
|
+
# Note(zeiler): would be nice to exclude checkpoints in case they were pre-downloaded.
|
24
32
|
COPY . /app/model_dir/${name}
|
25
33
|
|
26
34
|
# Add the model directory to the python path.
|
27
|
-
ENV PYTHONPATH
|
35
|
+
ENV PYTHONPATH=${PYTHONPATH}:/app/model_dir/${name}
|
36
|
+
|
37
|
+
ENTRYPOINT ["python", "-m", "clarifai.runners.server"]
|
28
38
|
|
29
39
|
# Finally run the clarifai entrypoint to start the runner loop and local dev server.
|
30
40
|
# Note(zeiler): we may want to make this a clarifai CLI call.
|
31
|
-
CMD ["
|
41
|
+
CMD ["--model_path", "/app/model_dir/main"]
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import os
|
2
|
+
import re
|
2
3
|
import time
|
3
4
|
from string import Template
|
4
5
|
|
@@ -23,6 +24,44 @@ def _clear_line(n: int = 1) -> None:
|
|
23
24
|
|
24
25
|
class ModelUploader:
|
25
26
|
DEFAULT_PYTHON_VERSION = 3.11
|
27
|
+
DEFAULT_TORCH_VERSION = '2.4.0'
|
28
|
+
DEFAULT_CUDA_VERSION = '124'
|
29
|
+
# List of available torch images for matrix
|
30
|
+
'''
|
31
|
+
python_version: ['3.8', '3.9', '3.10', '3.11']
|
32
|
+
torch_version: ['2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.4.1', '2.5.0']
|
33
|
+
cuda_version: ['124']
|
34
|
+
'''
|
35
|
+
AVAILABLE_TORCH_IMAGES = [
|
36
|
+
'2.0.0-py3.8-cuda124',
|
37
|
+
'2.0.0-py3.9-cuda124',
|
38
|
+
'2.0.0-py3.10-cuda124',
|
39
|
+
'2.0.0-py3.11-cuda124',
|
40
|
+
'2.1.0-py3.8-cuda124',
|
41
|
+
'2.1.0-py3.9-cuda124',
|
42
|
+
'2.1.0-py3.10-cuda124',
|
43
|
+
'2.1.0-py3.11-cuda124',
|
44
|
+
'2.2.0-py3.8-cuda124',
|
45
|
+
'2.2.0-py3.9-cuda124',
|
46
|
+
'2.2.0-py3.10-cuda124',
|
47
|
+
'2.2.0-py3.11-cuda124',
|
48
|
+
'2.3.0-py3.8-cuda124',
|
49
|
+
'2.3.0-py3.9-cuda124',
|
50
|
+
'2.3.0-py3.10-cuda124',
|
51
|
+
'2.3.0-py3.11-cuda124',
|
52
|
+
'2.4.0-py3.8-cuda124',
|
53
|
+
'2.4.0-py3.9-cuda124',
|
54
|
+
'2.4.0-py3.10-cuda124',
|
55
|
+
'2.4.0-py3.11-cuda124',
|
56
|
+
'2.4.1-py3.8-cuda124',
|
57
|
+
'2.4.1-py3.9-cuda124',
|
58
|
+
'2.4.1-py3.10-cuda124',
|
59
|
+
'2.4.1-py3.11-cuda124',
|
60
|
+
]
|
61
|
+
AVAILABLE_PYTHON_IMAGES = ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13']
|
62
|
+
PYTHON_BASE_IMAGE = 'public.ecr.aws/clarifai-models/python-base:{python_version}'
|
63
|
+
TORCH_BASE_IMAGE = 'public.ecr.aws/clarifai-models/torch:{torch_version}-py{python_version}-cuda{cuda_version}'
|
64
|
+
|
26
65
|
CONCEPTS_REQUIRED_MODEL_TYPE = [
|
27
66
|
'visual-classifier', 'visual-detector', 'visual-segmenter', 'text-classifier'
|
28
67
|
]
|
@@ -144,18 +183,46 @@ class ModelUploader:
|
|
144
183
|
)
|
145
184
|
return self.client.STUB.PostModels(request)
|
146
185
|
|
186
|
+
def _parse_requirements(self):
|
187
|
+
# parse the user's requirements.txt to determine the proper base image to build on top of, based on the torch and other large dependencies and it's versions
|
188
|
+
# List of dependencies to look for
|
189
|
+
dependencies = [
|
190
|
+
'torch',
|
191
|
+
]
|
192
|
+
# Escape dependency names for regex
|
193
|
+
dep_pattern = '|'.join(map(re.escape, dependencies))
|
194
|
+
# All possible version specifiers
|
195
|
+
version_specifiers = '==|>=|<=|!=|~=|>|<'
|
196
|
+
# Compile a regex pattern with verbose mode for readability
|
197
|
+
pattern = re.compile(r"""
|
198
|
+
^\s* # Start of line, optional whitespace
|
199
|
+
(?P<dependency>""" + dep_pattern + r""") # Dependency name
|
200
|
+
\s* # Optional whitespace
|
201
|
+
(?P<specifier>""" + version_specifiers + r""")? # Optional version specifier
|
202
|
+
\s* # Optional whitespace
|
203
|
+
(?P<version>[^\s;]+)? # Optional version (up to space or semicolon)
|
204
|
+
""", re.VERBOSE)
|
205
|
+
|
206
|
+
deendencies_version = {}
|
207
|
+
with open(os.path.join(self.folder, 'requirements.txt'), 'r') as file:
|
208
|
+
for line in file:
|
209
|
+
# Skip empty lines and comments
|
210
|
+
line = line.strip()
|
211
|
+
if not line or line.startswith('#'):
|
212
|
+
continue
|
213
|
+
match = pattern.match(line)
|
214
|
+
if match:
|
215
|
+
dependency = match.group('dependency')
|
216
|
+
version = match.group('version')
|
217
|
+
deendencies_version[dependency] = version if version else None
|
218
|
+
return deendencies_version
|
219
|
+
|
147
220
|
def create_dockerfile(self):
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
'Dockerfile.cuda.template',
|
154
|
-
)
|
155
|
-
else:
|
156
|
-
dockerfile_template = os.path.join(
|
157
|
-
os.path.dirname(os.path.dirname(__file__)), 'dockerfile_template',
|
158
|
-
'Dockerfile.cpu.template')
|
221
|
+
dockerfile_template = os.path.join(
|
222
|
+
os.path.dirname(os.path.dirname(__file__)),
|
223
|
+
'dockerfile_template',
|
224
|
+
'Dockerfile.template',
|
225
|
+
)
|
159
226
|
|
160
227
|
with open(dockerfile_template, 'r') as template_file:
|
161
228
|
dockerfile_template = template_file.read()
|
@@ -166,6 +233,11 @@ class ModelUploader:
|
|
166
233
|
build_info = self.config.get('build_info', {})
|
167
234
|
if 'python_version' in build_info:
|
168
235
|
python_version = build_info['python_version']
|
236
|
+
if python_version not in self.AVAILABLE_PYTHON_IMAGES:
|
237
|
+
logger.error(
|
238
|
+
f"Python version {python_version} not supported, please use one of the following versions: {self.AVAILABLE_PYTHON_IMAGES}"
|
239
|
+
)
|
240
|
+
return
|
169
241
|
logger.info(
|
170
242
|
f"Using Python version {python_version} from the config file to build the Dockerfile")
|
171
243
|
else:
|
@@ -174,10 +246,26 @@ class ModelUploader:
|
|
174
246
|
)
|
175
247
|
python_version = self.DEFAULT_PYTHON_VERSION
|
176
248
|
|
249
|
+
base_image = self.PYTHON_BASE_IMAGE.format(python_version=python_version)
|
250
|
+
|
251
|
+
# Parse the requirements.txt file to determine the base image
|
252
|
+
dependencies = self._parse_requirements()
|
253
|
+
if 'torch' in dependencies and dependencies['torch']:
|
254
|
+
torch_version = dependencies['torch']
|
255
|
+
|
256
|
+
for image in self.AVAILABLE_TORCH_IMAGES:
|
257
|
+
if torch_version in image and f'py{python_version}' in image:
|
258
|
+
base_image = self.TORCH_BASE_IMAGE.format(
|
259
|
+
torch_version=torch_version,
|
260
|
+
python_version=python_version,
|
261
|
+
cuda_version=self.DEFAULT_CUDA_VERSION)
|
262
|
+
logger.info(f"Using Torch version {torch_version} base image to build the Docker image")
|
263
|
+
break
|
264
|
+
|
177
265
|
# Replace placeholders with actual values
|
178
266
|
dockerfile_content = dockerfile_template.safe_substitute(
|
179
|
-
PYTHON_VERSION=python_version,
|
180
267
|
name='main',
|
268
|
+
BASE_IMAGE=base_image,
|
181
269
|
)
|
182
270
|
|
183
271
|
# Write Dockerfile
|
clarifai/utils/cli.py
CHANGED
@@ -5,6 +5,11 @@ import pkgutil
|
|
5
5
|
import click
|
6
6
|
import yaml
|
7
7
|
|
8
|
+
from rich.console import Console
|
9
|
+
from rich.panel import Panel
|
10
|
+
from rich.style import Style
|
11
|
+
from rich.text import Text
|
12
|
+
|
8
13
|
|
9
14
|
def from_yaml(filename: str):
|
10
15
|
try:
|
@@ -43,3 +48,23 @@ def load_command_modules():
|
|
43
48
|
for _, module_name, _ in pkgutil.iter_modules([package_dir]):
|
44
49
|
if module_name != 'base': # Skip the base.py file itself
|
45
50
|
importlib.import_module(f'clarifai.cli.{module_name}')
|
51
|
+
|
52
|
+
|
53
|
+
def display_co_resources(response, resource_type):
|
54
|
+
"""Display compute orchestration resources listing results using rich."""
|
55
|
+
|
56
|
+
console = Console()
|
57
|
+
panel = Panel(
|
58
|
+
Text(f"List of {resource_type}s", justify="center"),
|
59
|
+
title="",
|
60
|
+
style=Style(color="blue", bold=True),
|
61
|
+
border_style="green",
|
62
|
+
width=60)
|
63
|
+
console.print(panel)
|
64
|
+
for indx, item in enumerate(list(response)):
|
65
|
+
panel = Panel(
|
66
|
+
"\n".join([f"{'ID'}: {item.id}", f"{'Description'}: {item.description}"]),
|
67
|
+
title=f"{resource_type} {(indx + 1)}",
|
68
|
+
border_style="green",
|
69
|
+
width=60)
|
70
|
+
console.print(panel)
|
@@ -1,26 +1,30 @@
|
|
1
|
-
clarifai/__init__.py,sha256=
|
1
|
+
clarifai/__init__.py,sha256=9zsqePD7LCjMaH7l_9eXWc_FNCxosvR1TP75yQryev4,24
|
2
2
|
clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
clarifai/errors.py,sha256=RwzTajwds51wLD0MVlMC5kcpBnzRpreDLlazPSBZxrg,2605
|
4
4
|
clarifai/versions.py,sha256=jctnczzfGk_S3EnVqb2FjRKfSREkNmvNEwAAa_VoKiQ,222
|
5
|
+
clarifai/cli/README.md,sha256=YGApHfeUyu5P0Pdth-mqQCQftWHDxz6bugDlvDXDhOE,1942
|
5
6
|
clarifai/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
-
clarifai/cli/base.py,sha256=
|
7
|
-
clarifai/cli/
|
7
|
+
clarifai/cli/base.py,sha256=okuBNlMmLEQw9-0f4yzemCtneNNRTVXUugCwD58-ZtQ,3417
|
8
|
+
clarifai/cli/compute_cluster.py,sha256=N2dNQNJEPg9nxsb8x2igEzYuGRzjn7l4kNttjFIxmhI,1827
|
9
|
+
clarifai/cli/deployment.py,sha256=sUEuz5-rtozMx8deVcJXLi6lHsP2jc8x3y2MpUAVfqY,2506
|
10
|
+
clarifai/cli/model.py,sha256=lp_5FGO-lWb07c7nNHEsJ6NA0mxVyvL3wqU5N-_sbEU,1934
|
11
|
+
clarifai/cli/nodepool.py,sha256=yihxS_rIFoBBKzRlqBX8Ab42iPpBMJrJFsk8saph6ms,3049
|
8
12
|
clarifai/client/__init__.py,sha256=xI1U0l5AZdRThvQAXCLsd9axxyFzXXJ22m8LHqVjQRU,662
|
9
13
|
clarifai/client/app.py,sha256=6pckYme1urV2YJjLIYfeZ-vH0Z5YSQa51jzIMcEfwug,38342
|
10
14
|
clarifai/client/base.py,sha256=hSHOqkXbSKyaRDeylMMnkhUHCAHhEqno4KI0CXGziBA,7536
|
11
|
-
clarifai/client/compute_cluster.py,sha256=
|
12
|
-
clarifai/client/dataset.py,sha256=
|
13
|
-
clarifai/client/deployment.py,sha256=
|
14
|
-
clarifai/client/input.py,sha256=
|
15
|
+
clarifai/client/compute_cluster.py,sha256=EvW9TJjPvInUlggfg1A98sxoWH8_PY5rCVXZhsj6ac0,8705
|
16
|
+
clarifai/client/dataset.py,sha256=AIzwbYs-ExkmUqW9nuEJgpW8-D7rjA1PtopU5Iu6YZE,32018
|
17
|
+
clarifai/client/deployment.py,sha256=w7Y6pA1rYG4KRK1SwusRZc2sQRXlG8wezuVdzSWpCo0,2586
|
18
|
+
clarifai/client/input.py,sha256=GvrPV2chThNjimekBIleuIr6AD10_wrfc-1Hm5C4NQ8,45648
|
15
19
|
clarifai/client/lister.py,sha256=03KGMvs5RVyYqxLsSrWhNc34I8kiF1Ph0NeyEwu7nMU,2082
|
16
20
|
clarifai/client/model.py,sha256=WmLBPm_rDzbPR_Cxo8gnntBnPiWFt3gYKiiKuJ9lH04,84652
|
17
21
|
clarifai/client/module.py,sha256=FTkm8s9m-EaTKN7g9MnLhGJ9eETUfKG7aWZ3o1RshYs,4204
|
18
|
-
clarifai/client/nodepool.py,sha256=
|
22
|
+
clarifai/client/nodepool.py,sha256=la3vTFrO4LX8zm2eQ5jqf2L0-kQ63Dano8FibadoZbk,10152
|
19
23
|
clarifai/client/search.py,sha256=GaPWN6JmTQGZaCHr6U1yv0zqR6wKFl7i9IVLg2ul1CI,14254
|
20
|
-
clarifai/client/user.py,sha256=
|
24
|
+
clarifai/client/user.py,sha256=WOv66ww2wx9isIge3V-xTHCkqC6akl2jEGAxzT_Ugfs,17642
|
21
25
|
clarifai/client/workflow.py,sha256=BOmA1ilHxsw-yl_ZE1NOEshzV7owavnXTIG2UOD6PuA,12163
|
22
26
|
clarifai/client/auth/__init__.py,sha256=7EwR0NrozkAUwpUnCsqXvE_p0wqx_SelXlSpKShKJK0,136
|
23
|
-
clarifai/client/auth/helper.py,sha256=
|
27
|
+
clarifai/client/auth/helper.py,sha256=Ngw5IDkOWvnOz5YwViVk55z3mC52MyezLc0G3WxLqok,14643
|
24
28
|
clarifai/client/auth/register.py,sha256=2CMdBsoVLoTfjyksE6j7BM2tiEc73WKYvxnwDDgNn1k,536
|
25
29
|
clarifai/client/auth/stub.py,sha256=xy4-fV0W8keCgXld4eOVzFQEIKxOktNwtL5bLztReug,4940
|
26
30
|
clarifai/constants/base.py,sha256=ogmFSZYoF0YhGjHg5aiOc3MLqPr_poKAls6xaD0_C3U,89
|
@@ -37,7 +41,7 @@ clarifai/datasets/upload/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
37
41
|
clarifai/datasets/upload/base.py,sha256=UIc0ufyIBCrb83_sFpv21L8FshsX4nwsLYQkdlJfzD4,2357
|
38
42
|
clarifai/datasets/upload/features.py,sha256=jv2x7jGZKS-LMt87sEZNBwwOskHbP26XTMjoiaSA5pg,2024
|
39
43
|
clarifai/datasets/upload/image.py,sha256=HlCsfEMu_C4GVecGSv52RUJ6laLW8H64Pfj_FQyX6qg,8580
|
40
|
-
clarifai/datasets/upload/multimodal.py,sha256=
|
44
|
+
clarifai/datasets/upload/multimodal.py,sha256=4jBFXgT44tPFHm3O3lYcnKM046qjUNJJaR0oBVTa3HM,2309
|
41
45
|
clarifai/datasets/upload/text.py,sha256=boVJenfQZKf79aXu8CEP4g_ANzX5ROdd06g07O7RnXU,2198
|
42
46
|
clarifai/datasets/upload/utils.py,sha256=BerWhq40ZUN30z6VImlc93eZtT-1vI18AMgSOuNzJEM,9647
|
43
47
|
clarifai/datasets/upload/loaders/README.md,sha256=aNRutSCTzLp2ruIZx74ZkN5AxpzwKOxMa7OzabnKpwg,2980
|
@@ -58,15 +62,14 @@ clarifai/rag/rag.py,sha256=L10TcV9E0PF1aJ2Nn1z1x6WVoUoGxbKt20lQXg8ksqo,12594
|
|
58
62
|
clarifai/rag/utils.py,sha256=yr1jAcbpws4vFGBqlAwPPE7v1DRba48g8gixLFw8OhQ,4070
|
59
63
|
clarifai/runners/__init__.py,sha256=3vr4RVvN1IRy2SxJpyycAAvrUBbH-mXR7pqUmu4w36A,412
|
60
64
|
clarifai/runners/server.py,sha256=CVLrv2DjzCvKVXcJ4SWvcFWUZq0bdlBmyEpfVlfgT2A,4902
|
61
|
-
clarifai/runners/dockerfile_template/Dockerfile.
|
62
|
-
clarifai/runners/dockerfile_template/Dockerfile.cuda.template,sha256=8uQp2sX_bIzgQk84FNlS19PwKH_l0Qi54xE7_NVxUTE,3314
|
65
|
+
clarifai/runners/dockerfile_template/Dockerfile.template,sha256=-T38Rscpjot8WVuUTUq1_N0xz_gg653FOHV4XQYGG-U,1453
|
63
66
|
clarifai/runners/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
64
67
|
clarifai/runners/models/base_typed_model.py,sha256=OnAk08Lo2Y1fGiBc6JJ6UvJ8P435cTsikTNYDkStDpI,7790
|
65
68
|
clarifai/runners/models/model_class.py,sha256=9JSPAr4U4K7xI0kSl-q0mHB06zknm2OR-8XIgBCto94,1611
|
66
69
|
clarifai/runners/models/model_run_locally.py,sha256=xbNcD0TMRlk52cUjJH-qenlkeiwS4YcOeb8eYy7KAEI,6583
|
67
70
|
clarifai/runners/models/model_runner.py,sha256=3vzoastQxkGRDK8T9aojDsLNBb9A3IiKm6YmbFrE9S0,6241
|
68
71
|
clarifai/runners/models/model_servicer.py,sha256=X4715PVA5PBurRTYcwSEudg8fShGV6InAF4mmRlRcHg,2826
|
69
|
-
clarifai/runners/models/model_upload.py,sha256=
|
72
|
+
clarifai/runners/models/model_upload.py,sha256=ocd6vnm9Pms9AMwi7j5yTjIHQY6LS9yopTTX6uocJvE,20434
|
70
73
|
clarifai/runners/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
71
74
|
clarifai/runners/utils/data_handler.py,sha256=sxy9zlAgI6ETuxCQhUgEXAn2GCsaW1GxpK6GTaMne0g,6966
|
72
75
|
clarifai/runners/utils/data_utils.py,sha256=R1iQ82TuQ9JwxCJk8yEB1Lyb0BYVhVbWJI9YDi1zGOs,318
|
@@ -75,7 +78,7 @@ clarifai/runners/utils/url_fetcher.py,sha256=-Hwjb1SURszn7zUVwi4Of0-nrksfZy-uqT4
|
|
75
78
|
clarifai/schema/search.py,sha256=JjTi8ammJgZZ2OGl4K6tIA4zEJ1Fr2ASZARXavI1j5c,2448
|
76
79
|
clarifai/urls/helper.py,sha256=tjoMGGHuWX68DUB0pk4MEjrmFsClUAQj2jmVEM_Sy78,4751
|
77
80
|
clarifai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
78
|
-
clarifai/utils/cli.py,sha256=
|
81
|
+
clarifai/utils/cli.py,sha256=O6ukcQb05pFIgdQKWn0tL0AALAjT3U3DFRjd_GgPCvk,1918
|
79
82
|
clarifai/utils/constants.py,sha256=MG_iHnSwNEyUZOpvsrTicNwaT4CIjmlK_Ixk_qqEX8g,142
|
80
83
|
clarifai/utils/logging.py,sha256=rhutBRQJLtkNRz8IErNCgbIpvtl2fQ3D2otYcGqd3-Q,11565
|
81
84
|
clarifai/utils/misc.py,sha256=ptjt1NtteDT0EhrPoyQ7mgWtvoAQ-XNncQaZvNHb0KI,2253
|
@@ -88,9 +91,9 @@ clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
88
91
|
clarifai/workflows/export.py,sha256=vICRhIreqDSShxLKjHNM2JwzKsf1B4fdXB0ciMcA70k,1945
|
89
92
|
clarifai/workflows/utils.py,sha256=nGeB_yjVgUO9kOeKTg4OBBaBz-AwXI3m-huSVj-9W18,1924
|
90
93
|
clarifai/workflows/validate.py,sha256=yJq03MaJqi5AK3alKGJJBR89xmmjAQ31sVufJUiOqY8,2556
|
91
|
-
clarifai-10.
|
92
|
-
clarifai-10.
|
93
|
-
clarifai-10.
|
94
|
-
clarifai-10.
|
95
|
-
clarifai-10.
|
96
|
-
clarifai-10.
|
94
|
+
clarifai-10.10.1.dist-info/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
|
95
|
+
clarifai-10.10.1.dist-info/METADATA,sha256=01eG2EIX_sgN8tgNkHnAD782mje2gLrspqrAFUEmSic,19566
|
96
|
+
clarifai-10.10.1.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
|
97
|
+
clarifai-10.10.1.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
|
98
|
+
clarifai-10.10.1.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
|
99
|
+
clarifai-10.10.1.dist-info/RECORD,,
|
@@ -1,83 +0,0 @@
|
|
1
|
-
# Build a virtualenv containing necessary system libraries and Python packages
|
2
|
-
# for users to install their own packages while also being distroless.
|
3
|
-
# * Install python3-venv
|
4
|
-
# * Install gcc libpython3-dev to compile C Python modules
|
5
|
-
# * In the virtualenv: Update pip setuputils and wheel to support building new packages
|
6
|
-
# * Export environment variables to use the virtualenv by default
|
7
|
-
# * Create a non-root user with minimal privileges and use it
|
8
|
-
ARG TARGET_PLATFORM=linux/amd64
|
9
|
-
FROM --platform=$TARGET_PLATFORM public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim-bookworm as build
|
10
|
-
|
11
|
-
ENV DEBIAN_FRONTEND=noninteractive
|
12
|
-
RUN apt-get update && \
|
13
|
-
apt-get install --no-install-suggests --no-install-recommends --yes \
|
14
|
-
software-properties-common \
|
15
|
-
gcc \
|
16
|
-
libpython3-dev && \
|
17
|
-
python${PYTHON_VERSION} -m venv /venv && \
|
18
|
-
/venv/bin/pip install --disable-pip-version-check --upgrade pip setuptools wheel && \
|
19
|
-
apt-get clean && rm -rf /var/lib/apt/lists/*
|
20
|
-
|
21
|
-
# Set environment variables to use virtualenv by default
|
22
|
-
ENV VIRTUAL_ENV=/venv
|
23
|
-
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
24
|
-
|
25
|
-
#############################
|
26
|
-
# User specific requirements
|
27
|
-
#############################
|
28
|
-
COPY requirements.txt .
|
29
|
-
|
30
|
-
# Install requirements and cleanup before leaving this line.
|
31
|
-
# Note(zeiler): this could be in a future template as {{model_python_deps}}
|
32
|
-
RUN python -m pip install -r requirements.txt && rm -rf /root/.cache
|
33
|
-
|
34
|
-
# Install Clarifai SDK
|
35
|
-
RUN python -m pip install clarifai
|
36
|
-
|
37
|
-
#############################
|
38
|
-
# Finally copy everything we built into a distroless image for runtime.
|
39
|
-
######################>#######
|
40
|
-
ARG TARGET_PLATFORM=linux/amd64
|
41
|
-
FROM --platform=$TARGET_PLATFORM gcr.io/distroless/python3-debian12:latest
|
42
|
-
# FROM --platform=$TARGET_PLATFORM gcr.io/distroless/python3-debian12:debug
|
43
|
-
ARG PYTHON_VERSION=${PYTHON_VERSION}
|
44
|
-
# needed to call pip directly
|
45
|
-
COPY --from=build /bin/sh /bin/sh
|
46
|
-
|
47
|
-
# virtual env
|
48
|
-
COPY --from=build /venv /venv
|
49
|
-
|
50
|
-
# We have to overwrite the python3 binary that the distroless image uses
|
51
|
-
COPY --from=build /usr/local/bin/python${PYTHON_VERSION} /usr/bin/python3
|
52
|
-
# And also copy in all the lib files for it.
|
53
|
-
COPY --from=build /usr/local/lib/ /usr/lib/
|
54
|
-
|
55
|
-
# Set environment variables to use virtualenv by default
|
56
|
-
ENV VIRTUAL_ENV=/venv
|
57
|
-
ENV PYTHONPATH=${PYTHONPATH}:${VIRTUAL_ENV}/lib/python${PYTHON_VERSION}/site-packages
|
58
|
-
|
59
|
-
# These will be set by the templaing system.
|
60
|
-
ENV CLARIFAI_PAT=${CLARIFAI_PAT}
|
61
|
-
ENV CLARIFAI_USER_ID=${CLARIFAI_USER_ID}
|
62
|
-
ENV CLARIFAI_RUNNER_ID=${CLARIFAI_RUNNER_ID}
|
63
|
-
ENV CLARIFAI_NODEPOOL_ID=${CLARIFAI_NODEPOOL_ID}
|
64
|
-
ENV CLARIFAI_COMPUTE_CLUSTER_ID=${CLARIFAI_COMPUTE_CLUSTER_ID}
|
65
|
-
ENV CLARIFAI_API_BASE=${CLARIFAI_API_BASE}
|
66
|
-
|
67
|
-
# Set the NUMBA cache dir to /tmp
|
68
|
-
ENV NUMBA_CACHE_DIR=/tmp/numba_cache
|
69
|
-
ENV HOME=/tmp
|
70
|
-
|
71
|
-
# Set the working directory to /app
|
72
|
-
WORKDIR /app
|
73
|
-
|
74
|
-
# Copy the current folder into /app/model_dir that the SDK will expect.
|
75
|
-
# Note(zeiler): would be nice to exclude checkpoints in case they were pre-downloaded.
|
76
|
-
COPY . /app/model_dir/${name}
|
77
|
-
|
78
|
-
# Add the model directory to the python path.
|
79
|
-
ENV PYTHONPATH=${PYTHONPATH}:/app/model_dir/${name}
|
80
|
-
|
81
|
-
# Finally run the clarifai entrypoint to start the runner loop and local dev server.
|
82
|
-
# Note(zeiler): we may want to make this a clarifai CLI call.
|
83
|
-
CMD ["-m", "clarifai.runners.server", "--model_path", "/app/model_dir/${name}"]
|
File without changes
|
File without changes
|
File without changes
|