clarifai 11.6.4__py3-none-any.whl → 11.6.4rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/__pycache__/errors.cpython-311.pyc +0 -0
- clarifai/__pycache__/errors.cpython-39.pyc +0 -0
- clarifai/__pycache__/versions.cpython-311.pyc +0 -0
- clarifai/__pycache__/versions.cpython-39.pyc +0 -0
- clarifai/cli/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/cli/__pycache__/base.cpython-39.pyc +0 -0
- clarifai/cli/__pycache__/compute_cluster.cpython-39.pyc +0 -0
- clarifai/cli/__pycache__/deployment.cpython-39.pyc +0 -0
- clarifai/cli/__pycache__/model.cpython-39.pyc +0 -0
- clarifai/cli/__pycache__/nodepool.cpython-39.pyc +0 -0
- clarifai/cli/base.py +11 -27
- clarifai/cli/model.py +171 -41
- clarifai/cli/model_templates.py +243 -0
- clarifai/cli/pipeline_step_templates.py +64 -0
- clarifai/client/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/app.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/app.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/base.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/base.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/compute_cluster.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/dataset.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/dataset.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/deployment.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/deployment.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/input.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/input.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/lister.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/lister.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/model.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/model.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/model_client.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/model_client.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/module.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/nodepool.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/runner.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/search.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/user.cpython-311.pyc +0 -0
- clarifai/client/__pycache__/workflow.cpython-311.pyc +0 -0
- clarifai/client/auth/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/client/auth/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/client/auth/__pycache__/helper.cpython-311.pyc +0 -0
- clarifai/client/auth/__pycache__/helper.cpython-39.pyc +0 -0
- clarifai/client/auth/__pycache__/register.cpython-311.pyc +0 -0
- clarifai/client/auth/__pycache__/register.cpython-39.pyc +0 -0
- clarifai/client/auth/__pycache__/stub.cpython-311.pyc +0 -0
- clarifai/client/auth/__pycache__/stub.cpython-39.pyc +0 -0
- clarifai/constants/__pycache__/base.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/base.cpython-39.pyc +0 -0
- clarifai/constants/__pycache__/dataset.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/dataset.cpython-39.pyc +0 -0
- clarifai/constants/__pycache__/input.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/input.cpython-39.pyc +0 -0
- clarifai/constants/__pycache__/model.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/model.cpython-39.pyc +0 -0
- clarifai/constants/__pycache__/search.cpython-311.pyc +0 -0
- clarifai/constants/__pycache__/workflow.cpython-311.pyc +0 -0
- clarifai/datasets/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/datasets/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/export/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/datasets/export/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/export/__pycache__/inputs_annotations.cpython-311.pyc +0 -0
- clarifai/datasets/export/__pycache__/inputs_annotations.cpython-39.pyc +0 -0
- clarifai/datasets/upload/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/upload/__pycache__/base.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/base.cpython-39.pyc +0 -0
- clarifai/datasets/upload/__pycache__/features.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/features.cpython-39.pyc +0 -0
- clarifai/datasets/upload/__pycache__/image.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/image.cpython-39.pyc +0 -0
- clarifai/datasets/upload/__pycache__/multimodal.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/multimodal.cpython-39.pyc +0 -0
- clarifai/datasets/upload/__pycache__/text.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/text.cpython-39.pyc +0 -0
- clarifai/datasets/upload/__pycache__/utils.cpython-311.pyc +0 -0
- clarifai/datasets/upload/__pycache__/utils.cpython-39.pyc +0 -0
- clarifai/models/model_serving/README.md +158 -0
- clarifai/models/model_serving/__init__.py +14 -0
- clarifai/models/model_serving/cli/__init__.py +12 -0
- clarifai/models/model_serving/cli/_utils.py +53 -0
- clarifai/models/model_serving/cli/base.py +14 -0
- clarifai/models/model_serving/cli/build.py +79 -0
- clarifai/models/model_serving/cli/clarifai_clis.py +33 -0
- clarifai/models/model_serving/cli/create.py +171 -0
- clarifai/models/model_serving/cli/example_cli.py +34 -0
- clarifai/models/model_serving/cli/login.py +26 -0
- clarifai/models/model_serving/cli/upload.py +179 -0
- clarifai/models/model_serving/constants.py +21 -0
- clarifai/models/model_serving/docs/cli.md +161 -0
- clarifai/models/model_serving/docs/concepts.md +229 -0
- clarifai/models/model_serving/docs/dependencies.md +11 -0
- clarifai/models/model_serving/docs/inference_parameters.md +139 -0
- clarifai/models/model_serving/docs/model_types.md +19 -0
- clarifai/models/model_serving/model_config/__init__.py +16 -0
- clarifai/models/model_serving/model_config/base.py +369 -0
- clarifai/models/model_serving/model_config/config.py +312 -0
- clarifai/models/model_serving/model_config/inference_parameter.py +129 -0
- clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +25 -0
- clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +19 -0
- clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +20 -0
- clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +19 -0
- clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +19 -0
- clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +22 -0
- clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +32 -0
- clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +19 -0
- clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +19 -0
- clarifai/models/model_serving/model_config/output.py +133 -0
- clarifai/models/model_serving/model_config/triton/__init__.py +14 -0
- clarifai/models/model_serving/model_config/triton/serializer.py +136 -0
- clarifai/models/model_serving/model_config/triton/triton_config.py +182 -0
- clarifai/models/model_serving/model_config/triton/wrappers.py +281 -0
- clarifai/models/model_serving/repo_build/__init__.py +14 -0
- clarifai/models/model_serving/repo_build/build.py +198 -0
- clarifai/models/model_serving/repo_build/static_files/_requirements.txt +2 -0
- clarifai/models/model_serving/repo_build/static_files/base_test.py +169 -0
- clarifai/models/model_serving/repo_build/static_files/inference.py +26 -0
- clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +25 -0
- clarifai/models/model_serving/repo_build/static_files/test.py +40 -0
- clarifai/models/model_serving/repo_build/static_files/triton/model.py +75 -0
- clarifai/models/model_serving/utils.py +23 -0
- clarifai/runners/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/runners/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/models/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/models/__pycache__/mcp_class.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/model_builder.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/model_builder.cpython-39.pyc +0 -0
- clarifai/runners/models/__pycache__/model_class.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/model_runner.cpython-311.pyc +0 -0
- clarifai/runners/models/__pycache__/openai_class.cpython-311.pyc +0 -0
- clarifai/runners/models/base_typed_model.py +238 -0
- clarifai/runners/models/model_upload.py +607 -0
- clarifai/runners/utils/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/code_script.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/code_script.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/const.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_utils.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_utils.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/loader.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/method_signatures.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/model_utils.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/openai_convertor.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/serializers.cpython-311.pyc +0 -0
- clarifai/runners/utils/__pycache__/url_fetcher.cpython-311.pyc +0 -0
- clarifai/runners/utils/data_handler.py +231 -0
- clarifai/runners/utils/data_types/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/runners/utils/data_types/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/utils/data_types/__pycache__/data_types.cpython-311.pyc +0 -0
- clarifai/runners/utils/data_types/__pycache__/data_types.cpython-39.pyc +0 -0
- clarifai/runners/utils/data_types.py +471 -0
- clarifai/runners/utils/temp.py +59 -0
- clarifai/schema/__pycache__/search.cpython-311.pyc +0 -0
- clarifai/urls/__pycache__/helper.cpython-311.pyc +0 -0
- clarifai/urls/__pycache__/helper.cpython-39.pyc +0 -0
- clarifai/utils/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/utils/__pycache__/cli.cpython-39.pyc +0 -0
- clarifai/utils/__pycache__/config.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/config.cpython-39.pyc +0 -0
- clarifai/utils/__pycache__/constants.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/constants.cpython-39.pyc +0 -0
- clarifai/utils/__pycache__/logging.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/logging.cpython-39.pyc +0 -0
- clarifai/utils/__pycache__/misc.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/misc.cpython-39.pyc +0 -0
- clarifai/utils/__pycache__/model_train.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/protobuf.cpython-311.pyc +0 -0
- clarifai/utils/__pycache__/protobuf.cpython-39.pyc +0 -0
- clarifai/utils/cli.py +14 -15
- clarifai/utils/constants.py +2 -0
- clarifai/utils/misc.py +381 -1
- clarifai/workflows/__pycache__/__init__.cpython-311.pyc +0 -0
- clarifai/workflows/__pycache__/export.cpython-311.pyc +0 -0
- clarifai/workflows/__pycache__/utils.cpython-311.pyc +0 -0
- clarifai/workflows/__pycache__/validate.cpython-311.pyc +0 -0
- {clarifai-11.6.4.dist-info → clarifai-11.6.4rc1.dist-info}/METADATA +1 -1
- clarifai-11.6.4rc1.dist-info/RECORD +301 -0
- {clarifai-11.6.4.dist-info → clarifai-11.6.4rc1.dist-info}/WHEEL +1 -1
- clarifai-11.6.4.dist-info/RECORD +0 -127
- {clarifai-11.6.4.dist-info → clarifai-11.6.4rc1.dist-info}/entry_points.txt +0 -0
- {clarifai-11.6.4.dist-info → clarifai-11.6.4rc1.dist-info}/licenses/LICENSE +0 -0
- {clarifai-11.6.4.dist-info → clarifai-11.6.4rc1.dist-info}/top_level.txt +0 -0
clarifai/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "11.6.
|
1
|
+
__version__ = "11.6.4rc1"
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
clarifai/cli/base.py
CHANGED
@@ -159,7 +159,7 @@ def env(ctx_obj):
|
|
159
159
|
@click.pass_context
|
160
160
|
def login(ctx, api_url, user_id):
|
161
161
|
"""Login command to set PAT and other configurations."""
|
162
|
-
from clarifai.utils.cli import
|
162
|
+
from clarifai.utils.cli import validate_context_auth
|
163
163
|
|
164
164
|
name = input('context name (default: "default"): ')
|
165
165
|
user_id = user_id if user_id is not None else input('user id: ')
|
@@ -168,17 +168,8 @@ def login(ctx, api_url, user_id):
|
|
168
168
|
'ENVVAR',
|
169
169
|
)
|
170
170
|
|
171
|
-
# Validate the
|
172
|
-
|
173
|
-
print("Validating PAT token...")
|
174
|
-
is_valid, error_message = validate_pat_token(pat, user_id, api_url)
|
175
|
-
|
176
|
-
if not is_valid:
|
177
|
-
print(f"❌ PAT token validation failed: {error_message}")
|
178
|
-
print("Please check your token and try again.")
|
179
|
-
return # Exit without saving the configuration
|
180
|
-
else:
|
181
|
-
print("✓ PAT token is valid")
|
171
|
+
# Validate the Context Credentials
|
172
|
+
validate_context_auth(pat, user_id, api_url)
|
182
173
|
|
183
174
|
context = Context(
|
184
175
|
name,
|
@@ -194,7 +185,9 @@ def login(ctx, api_url, user_id):
|
|
194
185
|
ctx.obj.current_context = context.name
|
195
186
|
|
196
187
|
ctx.obj.to_yaml()
|
197
|
-
|
188
|
+
logger.info(
|
189
|
+
f"Login successful and Configuration saved successfully for context '{context.name}'"
|
190
|
+
)
|
198
191
|
|
199
192
|
|
200
193
|
@cli.group(cls=AliasedGroup)
|
@@ -225,10 +218,10 @@ def create(
|
|
225
218
|
pat=None,
|
226
219
|
):
|
227
220
|
"""Create a new context"""
|
228
|
-
from clarifai.utils.cli import
|
221
|
+
from clarifai.utils.cli import validate_context_auth
|
229
222
|
|
230
223
|
if name in ctx.obj.contexts:
|
231
|
-
|
224
|
+
logger.info(f'"{name}" context already exists')
|
232
225
|
sys.exit(1)
|
233
226
|
if not user_id:
|
234
227
|
user_id = input('user id: ')
|
@@ -242,22 +235,13 @@ def create(
|
|
242
235
|
'ENVVAR',
|
243
236
|
)
|
244
237
|
|
245
|
-
# Validate the
|
246
|
-
|
247
|
-
print("Validating PAT token...")
|
248
|
-
is_valid, error_message = validate_pat_token(pat, user_id, base_url)
|
249
|
-
|
250
|
-
if not is_valid:
|
251
|
-
print(f"❌ PAT token validation failed: {error_message}")
|
252
|
-
print("Please check your token and try again.")
|
253
|
-
return # Exit without saving the configuration
|
254
|
-
else:
|
255
|
-
print("✓ PAT token is valid")
|
238
|
+
# Validate the Context Credentials
|
239
|
+
validate_context_auth(pat, user_id, base_url)
|
256
240
|
|
257
241
|
context = Context(name, CLARIFAI_USER_ID=user_id, CLARIFAI_API_BASE=base_url, CLARIFAI_PAT=pat)
|
258
242
|
ctx.obj.contexts[context.name] = context
|
259
243
|
ctx.obj.to_yaml()
|
260
|
-
|
244
|
+
logger.info(f"Context '{name}' created successfully")
|
261
245
|
|
262
246
|
|
263
247
|
# write a click command to delete a context
|
clarifai/cli/model.py
CHANGED
@@ -4,7 +4,7 @@ import tempfile
|
|
4
4
|
|
5
5
|
import click
|
6
6
|
|
7
|
-
from clarifai.cli.base import cli
|
7
|
+
from clarifai.cli.base import cli, pat_display
|
8
8
|
from clarifai.utils.cli import validate_context
|
9
9
|
from clarifai.utils.constants import (
|
10
10
|
DEFAULT_LOCAL_RUNNER_APP_ID,
|
@@ -15,9 +15,54 @@ from clarifai.utils.constants import (
|
|
15
15
|
DEFAULT_LOCAL_RUNNER_MODEL_TYPE,
|
16
16
|
DEFAULT_LOCAL_RUNNER_NODEPOOL_CONFIG,
|
17
17
|
DEFAULT_LOCAL_RUNNER_NODEPOOL_ID,
|
18
|
+
DEFAULT_OLLAMA_MODEL_REPO,
|
19
|
+
DEFAULT_OLLAMA_MODEL_REPO_BRANCH,
|
18
20
|
)
|
19
21
|
from clarifai.utils.logging import logger
|
20
|
-
from clarifai.utils.misc import clone_github_repo, format_github_repo_url
|
22
|
+
from clarifai.utils.misc import GitHubDownloader, clone_github_repo, format_github_repo_url
|
23
|
+
|
24
|
+
|
25
|
+
def customize_ollama_model(model_path, model_name, port, context_length):
|
26
|
+
"""Customize the Ollama model name in the cloned template files.
|
27
|
+
Args:
|
28
|
+
model_path: Path to the cloned model directory
|
29
|
+
model_name: The model name to set (e.g., 'llama3.1', 'mistral')
|
30
|
+
|
31
|
+
"""
|
32
|
+
model_py_path = os.path.join(model_path, "1", "model.py")
|
33
|
+
|
34
|
+
if not os.path.exists(model_py_path):
|
35
|
+
logger.warning(f"Model file {model_py_path} not found, skipping model name customization")
|
36
|
+
return
|
37
|
+
|
38
|
+
try:
|
39
|
+
# Read the model.py file
|
40
|
+
with open(model_py_path, 'r') as file:
|
41
|
+
content = file.read()
|
42
|
+
if model_name:
|
43
|
+
# Replace the default model name in the load_model method
|
44
|
+
content = content.replace(
|
45
|
+
'self.model = os.environ.get("OLLAMA_MODEL_NAME", \'llama3.2\')',
|
46
|
+
f'self.model = os.environ.get("OLLAMA_MODEL_NAME", \'{model_name}\')',
|
47
|
+
)
|
48
|
+
|
49
|
+
if port:
|
50
|
+
# Replace the default port variable in the model.py file
|
51
|
+
content = content.replace("PORT = '23333'", f"PORT = '{port}'")
|
52
|
+
|
53
|
+
if context_length:
|
54
|
+
# Replace the default context length variable in the model.py file
|
55
|
+
content = content.replace(
|
56
|
+
"context_length = '8192'", f"context_length = '{context_length}'"
|
57
|
+
)
|
58
|
+
|
59
|
+
# Write the modified content back to model.py
|
60
|
+
with open(model_py_path, 'w') as file:
|
61
|
+
file.write(content)
|
62
|
+
|
63
|
+
except Exception as e:
|
64
|
+
logger.error(f"Failed to customize Ollama model name in {model_py_path}: {e}")
|
65
|
+
raise
|
21
66
|
|
22
67
|
|
23
68
|
@cli.group(
|
@@ -48,21 +93,36 @@ def model():
|
|
48
93
|
help='GitHub Personal Access Token for authentication when cloning private repositories.',
|
49
94
|
)
|
50
95
|
@click.option(
|
51
|
-
'--github-
|
96
|
+
'--github-url',
|
52
97
|
required=False,
|
53
98
|
help='GitHub repository URL or "user/repo" format to clone a repository from. If provided, the entire repository contents will be copied to the target directory instead of using default templates.',
|
54
99
|
)
|
55
100
|
@click.option(
|
56
|
-
'--
|
101
|
+
'--toolkit',
|
102
|
+
type=click.Choice(['ollama'], case_sensitive=False),
|
57
103
|
required=False,
|
58
|
-
help='
|
104
|
+
help='Toolkit to use for model initialization. Currently supports "ollama".',
|
59
105
|
)
|
60
106
|
@click.option(
|
61
|
-
'--
|
62
|
-
|
63
|
-
help='
|
107
|
+
'--model-name',
|
108
|
+
required=False,
|
109
|
+
help='Model name to configure when using --toolkit. For ollama toolkit, this sets the Ollama model to use (e.g., "llama3.1", "mistral", etc.).',
|
110
|
+
)
|
111
|
+
@click.option(
|
112
|
+
'--port',
|
113
|
+
type=str,
|
114
|
+
help='Port to run the Ollama server on. Defaults to 23333.',
|
115
|
+
required=False,
|
116
|
+
)
|
117
|
+
@click.option(
|
118
|
+
'--context-length',
|
119
|
+
type=str,
|
120
|
+
help='Context length for the Ollama model. Defaults to 8192.',
|
121
|
+
required=False,
|
64
122
|
)
|
65
|
-
def init(
|
123
|
+
def init(
|
124
|
+
model_path, model_type_id, github_pat, github_url, toolkit, model_name, port, context_length
|
125
|
+
):
|
66
126
|
"""Initialize a new model directory structure.
|
67
127
|
|
68
128
|
Creates the following structure in the specified directory:
|
@@ -77,42 +137,99 @@ def init(model_path, model_type_id, github_pat, github_repo, branch, local_ollam
|
|
77
137
|
branch to clone from.
|
78
138
|
|
79
139
|
MODEL_PATH: Path where to create the model directory structure. If not specified, the current directory is used by default.
|
140
|
+
model_type_id: Type of model to initialize. Options are 'mcp' for MCPModelClass or 'openai' for OpenAIModelClass.
|
141
|
+
github_pat: GitHub Personal Access Token for cloning private repositories.
|
142
|
+
github_url: GitHub repository URL or "user/repo" format to clone a repository from. If provided, the entire repository contents will be copied to the target directory instead of using default templates.
|
143
|
+
toolkit: Toolkit to use for model initialization. Currently supports 'ollama'.
|
144
|
+
model_name: Model name to configure when using --toolkit. For ollama toolkit, this sets the Ollama model to use (e.g., 'llama3.1', 'mistral', etc.).
|
145
|
+
port: Port to run the Ollama server on. Defaults to 23333.
|
146
|
+
context_length: Context length for the Ollama model. Defaults to 8192.
|
80
147
|
"""
|
81
|
-
# Handle the --local-ollama-model flag
|
82
|
-
if local_ollama_model:
|
83
|
-
if github_repo or branch:
|
84
|
-
raise click.ClickException(
|
85
|
-
"Cannot specify both --local-ollama-model and --github-repo/--branch"
|
86
|
-
)
|
87
|
-
github_repo = "https://github.com/Clarifai/runners-examples"
|
88
|
-
branch = "ollama"
|
89
|
-
|
90
148
|
# Resolve the absolute path
|
91
149
|
model_path = os.path.abspath(model_path)
|
92
150
|
|
93
151
|
# Create the model directory if it doesn't exist
|
94
152
|
os.makedirs(model_path, exist_ok=True)
|
95
153
|
|
96
|
-
#
|
97
|
-
if
|
98
|
-
logger.
|
154
|
+
# Validate parameters
|
155
|
+
if port and not port.isdigit():
|
156
|
+
logger.error("Invalid value: --port must be a number")
|
157
|
+
raise click.Abort()
|
158
|
+
|
159
|
+
if context_length and not context_length.isdigit():
|
160
|
+
logger.error("Invalid value: --context-length must be a number")
|
161
|
+
raise click.Abort()
|
162
|
+
|
163
|
+
# Validate option combinations
|
164
|
+
if model_name and not (toolkit):
|
165
|
+
logger.error("--model-name can only be used with --toolkit")
|
166
|
+
raise click.Abort()
|
167
|
+
|
168
|
+
if toolkit and (github_url):
|
169
|
+
logger.error("Cannot specify both --toolkit and --github-repo")
|
170
|
+
raise click.Abort()
|
171
|
+
|
172
|
+
# --toolkit option
|
173
|
+
if toolkit == 'ollama':
|
174
|
+
github_url = DEFAULT_OLLAMA_MODEL_REPO
|
175
|
+
branch = DEFAULT_OLLAMA_MODEL_REPO_BRANCH
|
176
|
+
|
177
|
+
if github_url:
|
178
|
+
if not toolkit:
|
179
|
+
owner, repo, branch, folder_path = GitHubDownloader().parse_github_url(url=github_url)
|
180
|
+
logger.info(
|
181
|
+
f"Parsed GitHub repository: owner={owner}, repo={repo}, branch={branch}, folder_path={folder_path}"
|
182
|
+
)
|
183
|
+
if folder_path != "":
|
184
|
+
downloader = GitHubDownloader(
|
185
|
+
max_retries=3,
|
186
|
+
github_token=github_pat,
|
187
|
+
)
|
188
|
+
try:
|
189
|
+
downloader.download_github_folder(
|
190
|
+
url=github_url,
|
191
|
+
output_dir=model_path,
|
192
|
+
github_token=github_pat,
|
193
|
+
)
|
194
|
+
logger.info(f"Successfully downloaded folder contents to {model_path}")
|
195
|
+
logger.info("Model initialization complete with GitHub folder download")
|
196
|
+
return
|
197
|
+
|
198
|
+
except Exception as e:
|
199
|
+
logger.error(f"Failed to download GitHub folder: {e}")
|
200
|
+
# Continue with the rest of the initialization process
|
201
|
+
github_url = None # Fall back to template mode
|
202
|
+
|
203
|
+
elif branch and folder_path == "":
|
204
|
+
# When we have a branch but no specific folder path
|
205
|
+
logger.info(
|
206
|
+
f"Initializing model from GitHub repository: {github_url} (branch: {branch})"
|
207
|
+
)
|
208
|
+
|
209
|
+
# Check if it's a local path or normalize the GitHub repo URL
|
210
|
+
if os.path.exists(github_url):
|
211
|
+
repo_url = github_url
|
212
|
+
else:
|
213
|
+
repo_url = format_github_repo_url(github_url)
|
214
|
+
repo_url = f"https://github.com/{owner}/{repo}"
|
215
|
+
|
216
|
+
if toolkit:
|
217
|
+
logger.info(f"Initializing model from GitHub repository: {github_url}")
|
99
218
|
|
100
219
|
# Check if it's a local path or normalize the GitHub repo URL
|
101
|
-
if os.path.exists(
|
102
|
-
repo_url =
|
220
|
+
if os.path.exists(github_url):
|
221
|
+
repo_url = github_url
|
103
222
|
else:
|
104
|
-
repo_url = format_github_repo_url(
|
223
|
+
repo_url = format_github_repo_url(github_url)
|
105
224
|
|
225
|
+
try:
|
106
226
|
# Create a temporary directory for cloning
|
107
|
-
with tempfile.TemporaryDirectory() as
|
108
|
-
|
109
|
-
|
110
|
-
# Clone the repository
|
227
|
+
with tempfile.TemporaryDirectory(prefix="clarifai_model_") as clone_dir:
|
228
|
+
# Clone the repository with explicit branch parameter
|
111
229
|
if not clone_github_repo(repo_url, clone_dir, github_pat, branch):
|
112
|
-
logger.error(
|
113
|
-
|
114
|
-
|
115
|
-
github_repo = None # Fall back to template mode
|
230
|
+
logger.error(f"Failed to clone repository from {repo_url}")
|
231
|
+
github_url = None # Fall back to template mode
|
232
|
+
|
116
233
|
else:
|
117
234
|
# Copy the entire repository content to target directory (excluding .git)
|
118
235
|
for item in os.listdir(clone_dir):
|
@@ -127,15 +244,21 @@ def init(model_path, model_type_id, github_pat, github_repo, branch, local_ollam
|
|
127
244
|
else:
|
128
245
|
shutil.copy2(source_path, target_path)
|
129
246
|
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
247
|
+
except Exception as e:
|
248
|
+
logger.error(f"Failed to clone GitHub repository: {e}")
|
249
|
+
github_url = None
|
250
|
+
|
251
|
+
if (model_name or port or context_length) and (toolkit == 'ollama'):
|
252
|
+
customize_ollama_model(model_path, model_name, port, context_length)
|
253
|
+
|
254
|
+
logger.info("Model initialization complete with GitHub repository")
|
255
|
+
logger.info("Next steps:")
|
256
|
+
logger.info("1. Review the model configuration")
|
257
|
+
logger.info("2. Install any required dependencies manually")
|
258
|
+
logger.info("3. Test the model locally using 'clarifai model local-test'")
|
136
259
|
|
137
260
|
# Fall back to template-based initialization if no GitHub repo or if GitHub repo failed
|
138
|
-
if not
|
261
|
+
if not github_url:
|
139
262
|
from clarifai.cli.templates.model_templates import (
|
140
263
|
get_config_template,
|
141
264
|
get_model_template,
|
@@ -473,9 +596,16 @@ def local_runner(ctx, model_path, pool_size):
|
|
473
596
|
user_id = ctx.obj.current.user_id
|
474
597
|
logger.info(f"Current user_id: {user_id}")
|
475
598
|
if not user_id:
|
476
|
-
|
477
|
-
|
599
|
+
logger.error(f"User with ID '{user_id}' not found. Use 'clarifai login' to setup context.")
|
600
|
+
raise click.Abort()
|
601
|
+
pat = ctx.obj.current.pat
|
602
|
+
display_pat = pat_display(pat) if pat else ""
|
603
|
+
logger.info(f"Current PAT: {display_pat}")
|
604
|
+
if not pat:
|
605
|
+
logger.error(
|
606
|
+
"Personal Access Token (PAT) not found. Use 'clarifai login' to setup context."
|
478
607
|
)
|
608
|
+
raise click.Abort()
|
479
609
|
user = User(user_id=user_id, pat=ctx.obj.current.pat, base_url=ctx.obj.current.api_base)
|
480
610
|
logger.debug("Checking if a local runner compute cluster exists...")
|
481
611
|
|
@@ -0,0 +1,243 @@
|
|
1
|
+
"""Templates for model initialization."""
|
2
|
+
|
3
|
+
from clarifai import __version__
|
4
|
+
|
5
|
+
|
6
|
+
def get_model_class_template() -> str:
|
7
|
+
"""Return the template for a basic ModelClass-based model."""
|
8
|
+
return '''from typing import Iterator, List
|
9
|
+
from clarifai.runners.models.model_class import ModelClass
|
10
|
+
from clarifai.runners.util.data_utils import Param
|
11
|
+
|
12
|
+
class MyModel(ModelClass):
|
13
|
+
"""A custom model implementation using ModelClass."""
|
14
|
+
|
15
|
+
def load_model(self):
|
16
|
+
"""Load the model here.
|
17
|
+
# TODO: please fill in
|
18
|
+
# Add your model loading logic here
|
19
|
+
"""
|
20
|
+
pass
|
21
|
+
|
22
|
+
@ModelClass.method
|
23
|
+
def predict(
|
24
|
+
self,
|
25
|
+
prompt: str = "",
|
26
|
+
chat_history: List[dict] = None,
|
27
|
+
max_tokens: int = Param(default=256, description="The maximum number of tokens to generate. Shorter token lengths will provide faster performance."),
|
28
|
+
temperature: float = Param(default=1.0, description="A decimal number that determines the degree of randomness in the response"),
|
29
|
+
top_p: float = Param(default=1.0, description="An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass."),
|
30
|
+
) -> str:
|
31
|
+
"""This is the method that will be called when the runner is run. It takes in an input and returns an output."""
|
32
|
+
# TODO: please fill in
|
33
|
+
# Implement your prediction logic here
|
34
|
+
pass # Replace with your actual logic
|
35
|
+
|
36
|
+
@ModelClass.method
|
37
|
+
def generate(
|
38
|
+
self,
|
39
|
+
prompt: str = "",
|
40
|
+
chat_history: List[dict] = None,
|
41
|
+
max_tokens: int = Param(default=256, description="The maximum number of tokens to generate. Shorter token lengths will provide faster performance."),
|
42
|
+
temperature: float = Param(default=1.0, description="A decimal number that determines the degree of randomness in the response"),
|
43
|
+
top_p: float = Param(default=1.0, description="An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass."),
|
44
|
+
) -> Iterator[str]:
|
45
|
+
"""Example yielding a streamed response."""
|
46
|
+
# TODO: please fill in
|
47
|
+
# Implement your generation logic here
|
48
|
+
pass # Replace with your actual logic
|
49
|
+
'''
|
50
|
+
|
51
|
+
|
52
|
+
def get_mcp_model_class_template() -> str:
|
53
|
+
"""Return the template for an MCPModelClass-based model."""
|
54
|
+
return '''from typing import Any
|
55
|
+
|
56
|
+
from fastmcp import FastMCP # use fastmcp v2 not the built in mcp
|
57
|
+
from pydantic import Field
|
58
|
+
|
59
|
+
from clarifai.runners.models.mcp_class import MCPModelClass
|
60
|
+
|
61
|
+
# TODO: please fill in
|
62
|
+
# Configure your FastMCP server
|
63
|
+
server = FastMCP("my-mcp-server", instructions="", stateless_http=True)
|
64
|
+
|
65
|
+
|
66
|
+
# TODO: please fill in
|
67
|
+
# Add your tools, resources, and prompts here
|
68
|
+
@server.tool("example_tool", description="An example tool")
|
69
|
+
def example_tool(input_param: Any = Field(description="Example input parameter")):
|
70
|
+
"""Example tool implementation."""
|
71
|
+
# TODO: please fill in
|
72
|
+
# Implement your tool logic here
|
73
|
+
return f"Processed: {input_param}"
|
74
|
+
|
75
|
+
|
76
|
+
# Static resource example
|
77
|
+
@server.resource("config://version")
|
78
|
+
def get_version():
|
79
|
+
"""Example static resource."""
|
80
|
+
# TODO: please fill in
|
81
|
+
# Return your resource data
|
82
|
+
return "1.0.0"
|
83
|
+
|
84
|
+
|
85
|
+
@server.prompt()
|
86
|
+
def example_prompt(text: str) -> str:
|
87
|
+
"""Example prompt template."""
|
88
|
+
# TODO: please fill in
|
89
|
+
# Define your prompt template
|
90
|
+
return f"Process this text: {text}"
|
91
|
+
|
92
|
+
|
93
|
+
class MyModel(MCPModelClass):
|
94
|
+
"""A custom model implementation using MCPModelClass."""
|
95
|
+
|
96
|
+
def get_server(self) -> FastMCP:
|
97
|
+
"""Return the FastMCP server instance."""
|
98
|
+
return server
|
99
|
+
'''
|
100
|
+
|
101
|
+
|
102
|
+
def get_openai_model_class_template() -> str:
|
103
|
+
"""Return the template for an OpenAIModelClass-based model."""
|
104
|
+
return '''from typing import List
|
105
|
+
from openai import OpenAI
|
106
|
+
from clarifai.runners.models.openai_class import OpenAIModelClass
|
107
|
+
from clarifai.runners.util.data_utils import Param
|
108
|
+
from clarifai.runners.utils.openai_convertor import build_openai_messages
|
109
|
+
|
110
|
+
class MyModel(OpenAIModelClass):
|
111
|
+
"""A custom model implementation using OpenAIModelClass."""
|
112
|
+
|
113
|
+
# TODO: please fill in
|
114
|
+
# Configure your OpenAI-compatible client for local model
|
115
|
+
client = OpenAI(
|
116
|
+
api_key="local-key", # TODO: please fill in - use your local API key
|
117
|
+
base_url="http://localhost:8000/v1", # TODO: please fill in - your local model server endpoint
|
118
|
+
)
|
119
|
+
|
120
|
+
# TODO: please fill in
|
121
|
+
# Specify the model name to use
|
122
|
+
model = "my-local-model" # TODO: please fill in - replace with your local model name
|
123
|
+
|
124
|
+
def load_model(self):
|
125
|
+
"""Optional: Add any additional model loading logic here."""
|
126
|
+
# TODO: please fill in (optional)
|
127
|
+
# Add any initialization logic if needed
|
128
|
+
pass
|
129
|
+
|
130
|
+
@OpenAIModelClass.method
|
131
|
+
def predict(
|
132
|
+
self,
|
133
|
+
prompt: str = "",
|
134
|
+
chat_history: List[dict] = None,
|
135
|
+
max_tokens: int = Param(default=256, description="The maximum number of tokens to generate. Shorter token lengths will provide faster performance."),
|
136
|
+
temperature: float = Param(default=1.0, description="A decimal number that determines the degree of randomness in the response"),
|
137
|
+
top_p: float = Param(default=1.0, description="An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass."),
|
138
|
+
) -> str:
|
139
|
+
"""Run a single prompt completion using the OpenAI client."""
|
140
|
+
# TODO: please fill in
|
141
|
+
# Implement your prediction logic here
|
142
|
+
messages = build_openai_messages(prompt, chat_history)
|
143
|
+
response = self.client.chat.completions.create(
|
144
|
+
model=self.model,
|
145
|
+
messages=messages,
|
146
|
+
max_completion_tokens=max_tokens,
|
147
|
+
temperature=temperature,
|
148
|
+
top_p=top_p,
|
149
|
+
)
|
150
|
+
return response.choices[0].message.content
|
151
|
+
|
152
|
+
@OpenAIModelClass.method
|
153
|
+
def generate(
|
154
|
+
self,
|
155
|
+
prompt: str = "",
|
156
|
+
chat_history: List[dict] = None,
|
157
|
+
max_tokens: int = Param(default=256, description="The maximum number of tokens to generate. Shorter token lengths will provide faster performance."),
|
158
|
+
temperature: float = Param(default=1.0, description="A decimal number that determines the degree of randomness in the response"),
|
159
|
+
top_p: float = Param(default=1.0, description="An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass."),
|
160
|
+
):
|
161
|
+
"""Stream a completion response using the OpenAI client."""
|
162
|
+
# TODO: please fill in
|
163
|
+
# Implement your streaming logic here
|
164
|
+
messages = build_openai_messages(prompt, chat_history)
|
165
|
+
stream = self.client.chat.completions.create(
|
166
|
+
model=self.model,
|
167
|
+
messages=messages,
|
168
|
+
max_completion_tokens=max_tokens,
|
169
|
+
temperature=temperature,
|
170
|
+
top_p=top_p,
|
171
|
+
stream=True,
|
172
|
+
)
|
173
|
+
for chunk in stream:
|
174
|
+
if chunk.choices:
|
175
|
+
text = (chunk.choices[0].delta.content
|
176
|
+
if (chunk and chunk.choices[0].delta.content) is not None else '')
|
177
|
+
yield text
|
178
|
+
'''
|
179
|
+
|
180
|
+
|
181
|
+
def get_config_template(model_type_id: str = "text-to-text") -> str:
|
182
|
+
"""Return the template for config.yaml."""
|
183
|
+
return f'''# Configuration file for your Clarifai model
|
184
|
+
|
185
|
+
model:
|
186
|
+
id: "my-model" # TODO: please fill in - replace with your model ID
|
187
|
+
user_id: "user_id" # TODO: please fill in - replace with your user ID
|
188
|
+
app_id: "app_id" # TODO: please fill in - replace with your app ID
|
189
|
+
model_type_id: "{model_type_id}" # TODO: please fill in - replace if different model type ID
|
190
|
+
|
191
|
+
build_info:
|
192
|
+
python_version: "3.12"
|
193
|
+
|
194
|
+
# TODO: please fill in - adjust compute requirements for your model
|
195
|
+
inference_compute_info:
|
196
|
+
cpu_limit: "1" # TODO: please fill in - Amount of CPUs to use as a limit
|
197
|
+
cpu_memory: "1Gi" # TODO: please fill in - Amount of CPU memory to use as a limit
|
198
|
+
cpu_requests: "0.5" # TODO: please fill in - Amount of CPUs to use as a minimum
|
199
|
+
cpu_memory_requests: "512Mi" # TODO: please fill in - Amount of CPU memory to use as a minimum
|
200
|
+
num_accelerators: 1 # TODO: please fill in - Amount of GPU/TPUs to use
|
201
|
+
accelerator_type: ["NVIDIA-*"] # TODO: please fill in - type of accelerators requested
|
202
|
+
accelerator_memory: "1Gi" # TODO: please fill in - Amount of accelerator/GPU memory to use as a minimum
|
203
|
+
|
204
|
+
# TODO: please fill in (optional) - add checkpoints section if needed
|
205
|
+
# checkpoints:
|
206
|
+
# type: "huggingface" # supported type
|
207
|
+
# repo_id: "your-model-repo" # for huggingface
|
208
|
+
# when: "build" # or "runtime", "upload"
|
209
|
+
'''
|
210
|
+
|
211
|
+
|
212
|
+
def get_requirements_template(model_type_id: str = None) -> str:
|
213
|
+
"""Return the template for requirements.txt."""
|
214
|
+
requirements = f'''# Clarifai SDK - required
|
215
|
+
clarifai>={__version__}
|
216
|
+
'''
|
217
|
+
if model_type_id == "mcp":
|
218
|
+
requirements += "fastmcp\n"
|
219
|
+
elif model_type_id == "openai":
|
220
|
+
requirements += "openai\n"
|
221
|
+
requirements += '''
|
222
|
+
# TODO: please fill in - add your model's dependencies here
|
223
|
+
# Examples:
|
224
|
+
# torch>=2.0.0
|
225
|
+
# transformers>=4.30.0
|
226
|
+
# numpy>=1.21.0
|
227
|
+
# pillow>=9.0.0
|
228
|
+
'''
|
229
|
+
return requirements
|
230
|
+
|
231
|
+
|
232
|
+
# Mapping of model type IDs to their corresponding templates
|
233
|
+
MODEL_TYPE_TEMPLATES = {
|
234
|
+
"mcp": get_mcp_model_class_template,
|
235
|
+
"openai": get_openai_model_class_template,
|
236
|
+
}
|
237
|
+
|
238
|
+
|
239
|
+
def get_model_template(model_type_id: str = None) -> str:
|
240
|
+
"""Get the appropriate model template based on model_type_id."""
|
241
|
+
if model_type_id in MODEL_TYPE_TEMPLATES:
|
242
|
+
return MODEL_TYPE_TEMPLATES[model_type_id]()
|
243
|
+
return get_model_class_template()
|