clarifai 11.8.3__tar.gz → 11.8.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {clarifai-11.8.3/clarifai.egg-info → clarifai-11.8.5}/PKG-INFO +1 -1
 - clarifai-11.8.5/clarifai/__init__.py +1 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/model.py +31 -11
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/templates/model_templates.py +18 -11
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/model.py +29 -8
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/model_client.py +58 -19
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/method_signatures.py +9 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/cli.py +31 -7
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/constants.py +1 -0
 - {clarifai-11.8.3 → clarifai-11.8.5/clarifai.egg-info}/PKG-INFO +1 -1
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai.egg-info/SOURCES.txt +2 -1
 - clarifai-11.8.5/tests/test_with_proto_feature.py +222 -0
 - clarifai-11.8.3/clarifai/__init__.py +0 -1
 - {clarifai-11.8.3 → clarifai-11.8.5}/LICENSE +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/MANIFEST.in +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/README.md +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/README.md +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/__main__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/base.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/compute_cluster.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/deployment.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/nodepool.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/pipeline.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/pipeline_step.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/templates/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/templates/pipeline_step_templates.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli/templates/pipeline_templates.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/cli.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/app.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/auth/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/auth/helper.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/auth/register.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/auth/stub.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/base.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/compute_cluster.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/dataset.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/deployment.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/input.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/lister.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/module.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/nodepool.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/pipeline.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/pipeline_step.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/runner.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/search.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/user.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/client/workflow.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/constants/base.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/constants/dataset.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/constants/input.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/constants/model.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/constants/rag.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/constants/search.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/constants/workflow.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/export/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/export/inputs_annotations.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/base.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/features.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/image.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/loaders/README.md +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/loaders/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/loaders/coco_captions.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/loaders/coco_detection.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/loaders/imagenet_classification.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/loaders/xview_detection.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/multimodal.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/text.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/datasets/upload/utils.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/errors.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/models/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/models/api.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/modules/README.md +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/modules/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/modules/css.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/modules/pages.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/modules/style.css +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/rag/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/rag/rag.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/rag/utils.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/dockerfile_template/Dockerfile.template +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/dummy_openai_model.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/mcp_class.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/model_builder.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/model_class.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/model_run_locally.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/model_runner.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/model_servicer.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/openai_class.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/visual_classifier_class.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/models/visual_detector_class.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/pipeline_steps/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/pipeline_steps/pipeline_step_builder.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/pipelines/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/pipelines/pipeline_builder.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/server.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/code_script.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/const.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/data_types/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/data_types/data_types.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/data_utils.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/loader.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/model_utils.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/openai_convertor.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/pipeline_validation.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/serializers.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/runners/utils/url_fetcher.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/schema/search.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/urls/helper.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/config.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/evaluation/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/evaluation/helpers.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/evaluation/main.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/evaluation/testset_annotation_parser.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/hashing.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/logging.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/misc.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/model_train.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/protobuf.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/utils/secrets.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/versions.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/workflows/__init__.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/workflows/export.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/workflows/utils.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai/workflows/validate.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai.egg-info/dependency_links.txt +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai.egg-info/entry_points.txt +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai.egg-info/requires.txt +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/clarifai.egg-info/top_level.txt +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/pyproject.toml +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/requirements.txt +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/setup.cfg +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/setup.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_app.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_async_stub.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_auth.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_data_upload.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_eval.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_list_models.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_misc.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_model_predict.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_model_train.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_modules.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_pipeline_client.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_rag.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_search.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_secrets.py +0 -0
 - {clarifai-11.8.3 → clarifai-11.8.5}/tests/test_stub.py +0 -0
 
| 
         @@ -0,0 +1 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            __version__ = "11.8.5"
         
     | 
| 
         @@ -28,6 +28,7 @@ from clarifai.utils.constants import ( 
     | 
|
| 
       28 
28 
     | 
    
         
             
                DEFAULT_LOCAL_RUNNER_NODEPOOL_CONFIG,
         
     | 
| 
       29 
29 
     | 
    
         
             
                DEFAULT_LOCAL_RUNNER_NODEPOOL_ID,
         
     | 
| 
       30 
30 
     | 
    
         
             
                DEFAULT_OLLAMA_MODEL_REPO_BRANCH,
         
     | 
| 
      
 31 
     | 
    
         
            +
                DEFAULT_PYTHON_MODEL_REPO_BRANCH,
         
     | 
| 
       31 
32 
     | 
    
         
             
                DEFAULT_TOOLKIT_MODEL_REPO,
         
     | 
| 
       32 
33 
     | 
    
         
             
                DEFAULT_VLLM_MODEL_REPO_BRANCH,
         
     | 
| 
       33 
34 
     | 
    
         
             
            )
         
     | 
| 
         @@ -74,9 +75,11 @@ def model(): 
     | 
|
| 
       74 
75 
     | 
    
         
             
            )
         
     | 
| 
       75 
76 
     | 
    
         
             
            @click.option(
         
     | 
| 
       76 
77 
     | 
    
         
             
                '--toolkit',
         
     | 
| 
       77 
     | 
    
         
            -
                type=click.Choice( 
     | 
| 
      
 78 
     | 
    
         
            +
                type=click.Choice(
         
     | 
| 
      
 79 
     | 
    
         
            +
                    ['ollama', 'huggingface', 'lmstudio', 'vllm', 'python'], case_sensitive=False
         
     | 
| 
      
 80 
     | 
    
         
            +
                ),
         
     | 
| 
       78 
81 
     | 
    
         
             
                required=False,
         
     | 
| 
       79 
     | 
    
         
            -
                help='Toolkit to use for model initialization. Currently supports "ollama", "huggingface", "lmstudio" and " 
     | 
| 
      
 82 
     | 
    
         
            +
                help='Toolkit to use for model initialization. Currently supports "ollama", "huggingface", "lmstudio", "vllm" and "python".',
         
     | 
| 
       80 
83 
     | 
    
         
             
            )
         
     | 
| 
       81 
84 
     | 
    
         
             
            @click.option(
         
     | 
| 
       82 
85 
     | 
    
         
             
                '--model-name',
         
     | 
| 
         @@ -95,7 +98,9 @@ def model(): 
     | 
|
| 
       95 
98 
     | 
    
         
             
                help='Context length for the Ollama model. Defaults to 8192.',
         
     | 
| 
       96 
99 
     | 
    
         
             
                required=False,
         
     | 
| 
       97 
100 
     | 
    
         
             
            )
         
     | 
| 
      
 101 
     | 
    
         
            +
            @click.pass_context
         
     | 
| 
       98 
102 
     | 
    
         
             
            def init(
         
     | 
| 
      
 103 
     | 
    
         
            +
                ctx,
         
     | 
| 
       99 
104 
     | 
    
         
             
                model_path,
         
     | 
| 
       100 
105 
     | 
    
         
             
                model_type_id,
         
     | 
| 
       101 
106 
     | 
    
         
             
                github_pat,
         
     | 
| 
         @@ -124,11 +129,13 @@ def init( 
     | 
|
| 
       124 
129 
     | 
    
         
             
                MODEL_TYPE_ID: Type of model to create. If not specified, defaults to "text-to-text" for text models.\n
         
     | 
| 
       125 
130 
     | 
    
         
             
                GITHUB_PAT: GitHub Personal Access Token for authentication when cloning private repositories.\n
         
     | 
| 
       126 
131 
     | 
    
         
             
                GITHUB_URL: GitHub repository URL or "repo" format to clone a repository from. If provided, the entire repository contents will be copied to the target directory instead of using default templates.\n
         
     | 
| 
       127 
     | 
    
         
            -
                TOOLKIT: Toolkit to use for model initialization. Currently supports "ollama", "huggingface", "lmstudio" and " 
     | 
| 
      
 132 
     | 
    
         
            +
                TOOLKIT: Toolkit to use for model initialization. Currently supports "ollama", "huggingface", "lmstudio", "vllm" and "python".\n
         
     | 
| 
       128 
133 
     | 
    
         
             
                MODEL_NAME: Model name to configure when using --toolkit. For ollama toolkit, this sets the Ollama model to use (e.g., "llama3.1", "mistral", etc.). For vllm & huggingface toolkit, this sets the Hugging Face model repo_id (e.g., "Qwen/Qwen3-4B-Instruct-2507"). For lmstudio toolkit, this sets the LM Studio model name (e.g., "qwen/qwen3-4b-thinking-2507").\n
         
     | 
| 
       129 
134 
     | 
    
         
             
                PORT: Port to run the (Ollama/lmstudio) server on. Defaults to 23333.\n
         
     | 
| 
       130 
135 
     | 
    
         
             
                CONTEXT_LENGTH: Context length for the (Ollama/lmstudio) model. Defaults to 8192.\n
         
     | 
| 
       131 
136 
     | 
    
         
             
                """
         
     | 
| 
      
 137 
     | 
    
         
            +
                validate_context(ctx)
         
     | 
| 
      
 138 
     | 
    
         
            +
                user_id = ctx.obj.current.user_id
         
     | 
| 
       132 
139 
     | 
    
         
             
                # Resolve the absolute path
         
     | 
| 
       133 
140 
     | 
    
         
             
                model_path = os.path.abspath(model_path)
         
     | 
| 
       134 
141 
     | 
    
         | 
| 
         @@ -176,6 +183,9 @@ def init( 
     | 
|
| 
       176 
183 
     | 
    
         
             
                elif toolkit == 'vllm':
         
     | 
| 
       177 
184 
     | 
    
         
             
                    github_url = DEFAULT_TOOLKIT_MODEL_REPO
         
     | 
| 
       178 
185 
     | 
    
         
             
                    branch = DEFAULT_VLLM_MODEL_REPO_BRANCH
         
     | 
| 
      
 186 
     | 
    
         
            +
                elif toolkit == 'python':
         
     | 
| 
      
 187 
     | 
    
         
            +
                    github_url = DEFAULT_TOOLKIT_MODEL_REPO
         
     | 
| 
      
 188 
     | 
    
         
            +
                    branch = DEFAULT_PYTHON_MODEL_REPO_BRANCH
         
     | 
| 
       179 
189 
     | 
    
         | 
| 
       180 
190 
     | 
    
         
             
                if github_url:
         
     | 
| 
       181 
191 
     | 
    
         
             
                    downloader = GitHubDownloader(
         
     | 
| 
         @@ -304,15 +314,15 @@ def init( 
     | 
|
| 
       304 
314 
     | 
    
         
             
                        logger.error(f"Failed to clone GitHub repository: {e}")
         
     | 
| 
       305 
315 
     | 
    
         
             
                        github_url = None
         
     | 
| 
       306 
316 
     | 
    
         | 
| 
       307 
     | 
    
         
            -
                if (model_name or port or context_length) and (toolkit == 'ollama'):
         
     | 
| 
       308 
     | 
    
         
            -
                    customize_ollama_model(model_path, model_name, port, context_length)
         
     | 
| 
      
 317 
     | 
    
         
            +
                if (user_id or model_name or port or context_length) and (toolkit == 'ollama'):
         
     | 
| 
      
 318 
     | 
    
         
            +
                    customize_ollama_model(model_path, user_id, model_name, port, context_length)
         
     | 
| 
       309 
319 
     | 
    
         | 
| 
       310 
     | 
    
         
            -
                if (model_name or port or context_length) and (toolkit == 'lmstudio'):
         
     | 
| 
       311 
     | 
    
         
            -
                    customize_lmstudio_model(model_path, model_name, port, context_length)
         
     | 
| 
      
 320 
     | 
    
         
            +
                if (user_id or model_name or port or context_length) and (toolkit == 'lmstudio'):
         
     | 
| 
      
 321 
     | 
    
         
            +
                    customize_lmstudio_model(model_path, user_id, model_name, port, context_length)
         
     | 
| 
       312 
322 
     | 
    
         | 
| 
       313 
     | 
    
         
            -
                if model_name and (toolkit == 'huggingface' or toolkit == 'vllm'):
         
     | 
| 
      
 323 
     | 
    
         
            +
                if (user_id or model_name) and (toolkit == 'huggingface' or toolkit == 'vllm'):
         
     | 
| 
       314 
324 
     | 
    
         
             
                    # Update the config.yaml file with the provided model name
         
     | 
| 
       315 
     | 
    
         
            -
                    customize_huggingface_model(model_path, model_name)
         
     | 
| 
      
 325 
     | 
    
         
            +
                    customize_huggingface_model(model_path, user_id, model_name)
         
     | 
| 
       316 
326 
     | 
    
         | 
| 
       317 
327 
     | 
    
         
             
                if github_url:
         
     | 
| 
       318 
328 
     | 
    
         
             
                    logger.info("Model initialization complete with GitHub repository")
         
     | 
| 
         @@ -326,12 +336,20 @@ def init( 
     | 
|
| 
       326 
336 
     | 
    
         
             
                    logger.info("Initializing model with default templates...")
         
     | 
| 
       327 
337 
     | 
    
         
             
                    input("Press Enter to continue...")
         
     | 
| 
       328 
338 
     | 
    
         | 
| 
      
 339 
     | 
    
         
            +
                    from clarifai.cli.base import input_or_default
         
     | 
| 
       329 
340 
     | 
    
         
             
                    from clarifai.cli.templates.model_templates import (
         
     | 
| 
       330 
341 
     | 
    
         
             
                        get_config_template,
         
     | 
| 
       331 
342 
     | 
    
         
             
                        get_model_template,
         
     | 
| 
       332 
343 
     | 
    
         
             
                        get_requirements_template,
         
     | 
| 
       333 
344 
     | 
    
         
             
                    )
         
     | 
| 
       334 
345 
     | 
    
         | 
| 
      
 346 
     | 
    
         
            +
                    # Collect additional parameters for OpenAI template
         
     | 
| 
      
 347 
     | 
    
         
            +
                    template_kwargs = {}
         
     | 
| 
      
 348 
     | 
    
         
            +
                    if model_type_id == "openai":
         
     | 
| 
      
 349 
     | 
    
         
            +
                        logger.info("Configuring OpenAI local runner...")
         
     | 
| 
      
 350 
     | 
    
         
            +
                        port = input_or_default("Enter port (default: 8000): ", "8000")
         
     | 
| 
      
 351 
     | 
    
         
            +
                        template_kwargs = {"port": port}
         
     | 
| 
      
 352 
     | 
    
         
            +
             
     | 
| 
       335 
353 
     | 
    
         
             
                    # Create the 1/ subdirectory
         
     | 
| 
       336 
354 
     | 
    
         
             
                    model_version_dir = os.path.join(model_path, "1")
         
     | 
| 
       337 
355 
     | 
    
         
             
                    os.makedirs(model_version_dir, exist_ok=True)
         
     | 
| 
         @@ -341,7 +359,7 @@ def init( 
     | 
|
| 
       341 
359 
     | 
    
         
             
                    if os.path.exists(model_py_path):
         
     | 
| 
       342 
360 
     | 
    
         
             
                        logger.warning(f"File {model_py_path} already exists, skipping...")
         
     | 
| 
       343 
361 
     | 
    
         
             
                    else:
         
     | 
| 
       344 
     | 
    
         
            -
                        model_template = get_model_template(model_type_id)
         
     | 
| 
      
 362 
     | 
    
         
            +
                        model_template = get_model_template(model_type_id, **template_kwargs)
         
     | 
| 
       345 
363 
     | 
    
         
             
                        with open(model_py_path, 'w') as f:
         
     | 
| 
       346 
364 
     | 
    
         
             
                            f.write(model_template)
         
     | 
| 
       347 
365 
     | 
    
         
             
                        logger.info(f"Created {model_py_path}")
         
     | 
| 
         @@ -363,7 +381,9 @@ def init( 
     | 
|
| 
       363 
381 
     | 
    
         
             
                    else:
         
     | 
| 
       364 
382 
     | 
    
         
             
                        config_model_type_id = DEFAULT_LOCAL_RUNNER_MODEL_TYPE  # default
         
     | 
| 
       365 
383 
     | 
    
         | 
| 
       366 
     | 
    
         
            -
                        config_template = get_config_template( 
     | 
| 
      
 384 
     | 
    
         
            +
                        config_template = get_config_template(
         
     | 
| 
      
 385 
     | 
    
         
            +
                            user_id=user_id, model_type_id=config_model_type_id
         
     | 
| 
      
 386 
     | 
    
         
            +
                        )
         
     | 
| 
       367 
387 
     | 
    
         
             
                        with open(config_path, 'w') as f:
         
     | 
| 
       368 
388 
     | 
    
         
             
                            f.write(config_template)
         
     | 
| 
       369 
389 
     | 
    
         
             
                        logger.info(f"Created {config_path}")
         
     | 
| 
         @@ -99,9 +99,9 @@ class MyModel(MCPModelClass): 
     | 
|
| 
       99 
99 
     | 
    
         
             
            '''
         
     | 
| 
       100 
100 
     | 
    
         | 
| 
       101 
101 
     | 
    
         | 
| 
       102 
     | 
    
         
            -
            def get_openai_model_class_template() -> str:
         
     | 
| 
      
 102 
     | 
    
         
            +
            def get_openai_model_class_template(port: str = "8000") -> str:
         
     | 
| 
       103 
103 
     | 
    
         
             
                """Return the template for an OpenAIModelClass-based model."""
         
     | 
| 
       104 
     | 
    
         
            -
                return '''from typing import List
         
     | 
| 
      
 104 
     | 
    
         
            +
                return f'''from typing import List, Iterator
         
     | 
| 
       105 
105 
     | 
    
         
             
            from openai import OpenAI
         
     | 
| 
       106 
106 
     | 
    
         
             
            from clarifai.runners.models.openai_class import OpenAIModelClass
         
     | 
| 
       107 
107 
     | 
    
         
             
            from clarifai.runners.utils.data_utils import Param
         
     | 
| 
         @@ -114,12 +114,11 @@ class MyModel(OpenAIModelClass): 
     | 
|
| 
       114 
114 
     | 
    
         
             
                # Configure your OpenAI-compatible client for local model
         
     | 
| 
       115 
115 
     | 
    
         
             
                client = OpenAI(
         
     | 
| 
       116 
116 
     | 
    
         
             
                    api_key="local-key",  # TODO: please fill in - use your local API key
         
     | 
| 
       117 
     | 
    
         
            -
                    base_url="http://localhost: 
     | 
| 
      
 117 
     | 
    
         
            +
                    base_url="http://localhost:{port}/v1",  # TODO: please fill in - your local model server endpoint
         
     | 
| 
       118 
118 
     | 
    
         
             
                )
         
     | 
| 
       119 
119 
     | 
    
         | 
| 
       120 
     | 
    
         
            -
                #  
     | 
| 
       121 
     | 
    
         
            -
                 
     | 
| 
       122 
     | 
    
         
            -
                model = "my-local-model"  # TODO: please fill in - replace with your local model name
         
     | 
| 
      
 120 
     | 
    
         
            +
                # Automatically get the first available model
         
     | 
| 
      
 121 
     | 
    
         
            +
                model = client.models.list().data[0].id
         
     | 
| 
       123 
122 
     | 
    
         | 
| 
       124 
123 
     | 
    
         
             
                def load_model(self):
         
     | 
| 
       125 
124 
     | 
    
         
             
                    """Optional: Add any additional model loading logic here."""
         
     | 
| 
         @@ -157,7 +156,7 @@ class MyModel(OpenAIModelClass): 
     | 
|
| 
       157 
156 
     | 
    
         
             
                    max_tokens: int = Param(default=256, description="The maximum number of tokens to generate. Shorter token lengths will provide faster performance."),
         
     | 
| 
       158 
157 
     | 
    
         
             
                    temperature: float = Param(default=1.0, description="A decimal number that determines the degree of randomness in the response"),
         
     | 
| 
       159 
158 
     | 
    
         
             
                    top_p: float = Param(default=1.0, description="An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass."),
         
     | 
| 
       160 
     | 
    
         
            -
                ):
         
     | 
| 
      
 159 
     | 
    
         
            +
                ) -> Iterator[str]:
         
     | 
| 
       161 
160 
     | 
    
         
             
                    """Stream a completion response using the OpenAI client."""
         
     | 
| 
       162 
161 
     | 
    
         
             
                    # TODO: please fill in
         
     | 
| 
       163 
162 
     | 
    
         
             
                    # Implement your streaming logic here
         
     | 
| 
         @@ -178,13 +177,13 @@ class MyModel(OpenAIModelClass): 
     | 
|
| 
       178 
177 
     | 
    
         
             
            '''
         
     | 
| 
       179 
178 
     | 
    
         | 
| 
       180 
179 
     | 
    
         | 
| 
       181 
     | 
    
         
            -
            def get_config_template(model_type_id: str = "any-to-any") -> str:
         
     | 
| 
      
 180 
     | 
    
         
            +
            def get_config_template(user_id: str = None, model_type_id: str = "any-to-any") -> str:
         
     | 
| 
       182 
181 
     | 
    
         
             
                """Return the template for config.yaml."""
         
     | 
| 
       183 
182 
     | 
    
         
             
                return f'''# Configuration file for your Clarifai model
         
     | 
| 
       184 
183 
     | 
    
         | 
| 
       185 
184 
     | 
    
         
             
            model:
         
     | 
| 
       186 
185 
     | 
    
         
             
              id: "my-model"  # TODO: please fill in - replace with your model ID
         
     | 
| 
       187 
     | 
    
         
            -
              user_id: "user_id"  # TODO: please fill in - replace with your user ID
         
     | 
| 
      
 186 
     | 
    
         
            +
              user_id: "{user_id}"  # TODO: please fill in - replace with your user ID
         
     | 
| 
       188 
187 
     | 
    
         
             
              app_id: "app_id"  # TODO: please fill in - replace with your app ID
         
     | 
| 
       189 
188 
     | 
    
         
             
              model_type_id: "{model_type_id}"  # TODO: please fill in - replace if different model type ID
         
     | 
| 
       190 
189 
     | 
    
         | 
| 
         @@ -237,8 +236,16 @@ MODEL_TYPE_TEMPLATES = { 
     | 
|
| 
       237 
236 
     | 
    
         
             
            }
         
     | 
| 
       238 
237 
     | 
    
         | 
| 
       239 
238 
     | 
    
         | 
| 
       240 
     | 
    
         
            -
            def get_model_template(model_type_id: str = None) -> str:
         
     | 
| 
      
 239 
     | 
    
         
            +
            def get_model_template(model_type_id: str = None, **kwargs) -> str:
         
     | 
| 
       241 
240 
     | 
    
         
             
                """Get the appropriate model template based on model_type_id."""
         
     | 
| 
       242 
241 
     | 
    
         
             
                if model_type_id in MODEL_TYPE_TEMPLATES:
         
     | 
| 
       243 
     | 
    
         
            -
                     
     | 
| 
      
 242 
     | 
    
         
            +
                    template_func = MODEL_TYPE_TEMPLATES[model_type_id]
         
     | 
| 
      
 243 
     | 
    
         
            +
                    # Check if the template function accepts additional parameters
         
     | 
| 
      
 244 
     | 
    
         
            +
                    import inspect
         
     | 
| 
      
 245 
     | 
    
         
            +
             
     | 
| 
      
 246 
     | 
    
         
            +
                    sig = inspect.signature(template_func)
         
     | 
| 
      
 247 
     | 
    
         
            +
                    if len(sig.parameters) > 0:
         
     | 
| 
      
 248 
     | 
    
         
            +
                        return template_func(**kwargs)
         
     | 
| 
      
 249 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 250 
     | 
    
         
            +
                        return template_func()
         
     | 
| 
       244 
251 
     | 
    
         
             
                return get_model_class_template()
         
     | 
| 
         @@ -108,6 +108,7 @@ class Model(Lister, BaseClient): 
     | 
|
| 
       108 
108 
     | 
    
         
             
                    self.training_params = {}
         
     | 
| 
       109 
109 
     | 
    
         
             
                    self.input_types = None
         
     | 
| 
       110 
110 
     | 
    
         
             
                    self._client = None
         
     | 
| 
      
 111 
     | 
    
         
            +
                    self._async_client = None
         
     | 
| 
       111 
112 
     | 
    
         
             
                    self._added_methods = False
         
     | 
| 
       112 
113 
     | 
    
         
             
                    BaseClient.__init__(
         
     | 
| 
       113 
114 
     | 
    
         
             
                        self,
         
     | 
| 
         @@ -528,6 +529,23 @@ class Model(Lister, BaseClient): 
     | 
|
| 
       528 
529 
     | 
    
         
             
                        )
         
     | 
| 
       529 
530 
     | 
    
         
             
                    return self._client
         
     | 
| 
       530 
531 
     | 
    
         | 
| 
      
 532 
     | 
    
         
            +
                @property
         
     | 
| 
      
 533 
     | 
    
         
            +
                def async_client(self):
         
     | 
| 
      
 534 
     | 
    
         
            +
                    """Get the asynchronous client instance (with async stub)."""
         
     | 
| 
      
 535 
     | 
    
         
            +
                    if self._async_client is None:
         
     | 
| 
      
 536 
     | 
    
         
            +
                        request_template = service_pb2.PostModelOutputsRequest(
         
     | 
| 
      
 537 
     | 
    
         
            +
                            user_app_id=self.user_app_id,
         
     | 
| 
      
 538 
     | 
    
         
            +
                            model_id=self.id,
         
     | 
| 
      
 539 
     | 
    
         
            +
                            version_id=self.model_version.id,
         
     | 
| 
      
 540 
     | 
    
         
            +
                            model=self.model_info,
         
     | 
| 
      
 541 
     | 
    
         
            +
                            runner_selector=self._runner_selector,
         
     | 
| 
      
 542 
     | 
    
         
            +
                        )
         
     | 
| 
      
 543 
     | 
    
         
            +
                        # Create async client with async stub
         
     | 
| 
      
 544 
     | 
    
         
            +
                        self._async_client = ModelClient(
         
     | 
| 
      
 545 
     | 
    
         
            +
                            stub=self.STUB, async_stub=self.async_stub, request_template=request_template
         
     | 
| 
      
 546 
     | 
    
         
            +
                        )
         
     | 
| 
      
 547 
     | 
    
         
            +
                    return self._async_client
         
     | 
| 
      
 548 
     | 
    
         
            +
             
     | 
| 
       531 
549 
     | 
    
         
             
                def predict(self, *args, **kwargs):
         
     | 
| 
       532 
550 
     | 
    
         
             
                    """
         
     | 
| 
       533 
551 
     | 
    
         
             
                    Calls the model's predict() method with the given arguments.
         
     | 
| 
         @@ -574,16 +592,16 @@ class Model(Lister, BaseClient): 
     | 
|
| 
       574 
592 
     | 
    
         
             
                        )
         
     | 
| 
       575 
593 
     | 
    
         
             
                        inference_params = kwargs.get('inference_params', {})
         
     | 
| 
       576 
594 
     | 
    
         
             
                        output_config = kwargs.get('output_config', {})
         
     | 
| 
       577 
     | 
    
         
            -
                        return await self. 
     | 
| 
      
 595 
     | 
    
         
            +
                        return await self.async_client._async_predict_by_proto(
         
     | 
| 
       578 
596 
     | 
    
         
             
                            inputs=inputs, inference_params=inference_params, output_config=output_config
         
     | 
| 
       579 
597 
     | 
    
         
             
                        )
         
     | 
| 
       580 
598 
     | 
    
         | 
| 
       581 
599 
     | 
    
         
             
                    # Adding try-except, since the await works differently with jupyter kernels and in regular python scripts.
         
     | 
| 
       582 
600 
     | 
    
         
             
                    try:
         
     | 
| 
       583 
     | 
    
         
            -
                        return await self. 
     | 
| 
      
 601 
     | 
    
         
            +
                        return await self.async_client.predict(*args, **kwargs)
         
     | 
| 
       584 
602 
     | 
    
         
             
                    except TypeError:
         
     | 
| 
       585 
603 
     | 
    
         
             
                        # In jupyter, it returns a str object instead of a co-routine.
         
     | 
| 
       586 
     | 
    
         
            -
                        return self. 
     | 
| 
      
 604 
     | 
    
         
            +
                        return self.async_client.predict(*args, **kwargs)
         
     | 
| 
       587 
605 
     | 
    
         | 
| 
       588 
606 
     | 
    
         
             
                def __getattr__(self, name):
         
     | 
| 
       589 
607 
     | 
    
         
             
                    try:
         
     | 
| 
         @@ -596,7 +614,10 @@ class Model(Lister, BaseClient): 
     | 
|
| 
       596 
614 
     | 
    
         
             
                        self.client.fetch()
         
     | 
| 
       597 
615 
     | 
    
         
             
                        for method_name in self.client._method_signatures.keys():
         
     | 
| 
       598 
616 
     | 
    
         
             
                            if not hasattr(self, method_name):
         
     | 
| 
       599 
     | 
    
         
            -
                                 
     | 
| 
      
 617 
     | 
    
         
            +
                                if method_name.startswith('async_'):
         
     | 
| 
      
 618 
     | 
    
         
            +
                                    setattr(self, method_name, getattr(self.async_client, method_name))
         
     | 
| 
      
 619 
     | 
    
         
            +
                                else:
         
     | 
| 
      
 620 
     | 
    
         
            +
                                    setattr(self, method_name, getattr(self.client, method_name))
         
     | 
| 
       600 
621 
     | 
    
         
             
                    if hasattr(self.client, name):
         
     | 
| 
       601 
622 
     | 
    
         
             
                        return getattr(self.client, name)
         
     | 
| 
       602 
623 
     | 
    
         
             
                    raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
         
     | 
| 
         @@ -839,11 +860,11 @@ class Model(Lister, BaseClient): 
     | 
|
| 
       839 
860 
     | 
    
         
             
                        )
         
     | 
| 
       840 
861 
     | 
    
         
             
                        inference_params = kwargs.get('inference_params', {})
         
     | 
| 
       841 
862 
     | 
    
         
             
                        output_config = kwargs.get('output_config', {})
         
     | 
| 
       842 
     | 
    
         
            -
                        return self. 
     | 
| 
      
 863 
     | 
    
         
            +
                        return self.async_client._async_generate_by_proto(
         
     | 
| 
       843 
864 
     | 
    
         
             
                            inputs=inputs, inference_params=inference_params, output_config=output_config
         
     | 
| 
       844 
865 
     | 
    
         
             
                        )
         
     | 
| 
       845 
866 
     | 
    
         | 
| 
       846 
     | 
    
         
            -
                    return self. 
     | 
| 
      
 867 
     | 
    
         
            +
                    return self.async_client.generate(*args, **kwargs)
         
     | 
| 
       847 
868 
     | 
    
         | 
| 
       848 
869 
     | 
    
         
             
                def generate_by_filepath(
         
     | 
| 
       849 
870 
     | 
    
         
             
                    self,
         
     | 
| 
         @@ -1048,11 +1069,11 @@ class Model(Lister, BaseClient): 
     | 
|
| 
       1048 
1069 
     | 
    
         
             
                            )
         
     | 
| 
       1049 
1070 
     | 
    
         
             
                            inference_params = kwargs.get('inference_params', {})
         
     | 
| 
       1050 
1071 
     | 
    
         
             
                            output_config = kwargs.get('output_config', {})
         
     | 
| 
       1051 
     | 
    
         
            -
                            return self. 
     | 
| 
      
 1072 
     | 
    
         
            +
                            return self.async_client._async_stream_by_proto(
         
     | 
| 
       1052 
1073 
     | 
    
         
             
                                inputs=inputs, inference_params=inference_params, output_config=output_config
         
     | 
| 
       1053 
1074 
     | 
    
         
             
                            )
         
     | 
| 
       1054 
1075 
     | 
    
         | 
| 
       1055 
     | 
    
         
            -
                        return self. 
     | 
| 
      
 1076 
     | 
    
         
            +
                        return self.async_client.async_stream(*args, **kwargs)
         
     | 
| 
       1056 
1077 
     | 
    
         | 
| 
       1057 
1078 
     | 
    
         
             
                def stream_by_filepath(
         
     | 
| 
       1058 
1079 
     | 
    
         
             
                    self,
         
     | 
| 
         @@ -11,6 +11,7 @@ from clarifai.constants.model import MAX_MODEL_PREDICT_INPUTS 
     | 
|
| 
       11 
11 
     | 
    
         
             
            from clarifai.errors import UserError
         
     | 
| 
       12 
12 
     | 
    
         
             
            from clarifai.runners.utils import code_script, method_signatures
         
     | 
| 
       13 
13 
     | 
    
         
             
            from clarifai.runners.utils.method_signatures import (
         
     | 
| 
      
 14 
     | 
    
         
            +
                RESERVED_PARAM_WITH_PROTO,
         
     | 
| 
       14 
15 
     | 
    
         
             
                CompatibilitySerializer,
         
     | 
| 
       15 
16 
     | 
    
         
             
                deserialize,
         
     | 
| 
       16 
17 
     | 
    
         
             
                get_stream_from_signature,
         
     | 
| 
         @@ -204,6 +205,9 @@ class ModelClient: 
     | 
|
| 
       204 
205 
     | 
    
         | 
| 
       205 
206 
     | 
    
         
             
                        def bind_f(method_name, method_argnames, call_func, async_call_func):
         
     | 
| 
       206 
207 
     | 
    
         
             
                            def sync_f(*args, **kwargs):
         
     | 
| 
      
 208 
     | 
    
         
            +
                                # Extract with_proto parameter if present
         
     | 
| 
      
 209 
     | 
    
         
            +
                                with_proto = kwargs.pop(RESERVED_PARAM_WITH_PROTO, False)
         
     | 
| 
      
 210 
     | 
    
         
            +
             
     | 
| 
       207 
211 
     | 
    
         
             
                                if len(args) > len(method_argnames):
         
     | 
| 
       208 
212 
     | 
    
         
             
                                    raise TypeError(
         
     | 
| 
       209 
213 
     | 
    
         
             
                                        f"{method_name}() takes {len(method_argnames)} positional arguments but {len(args)} were given"
         
     | 
| 
         @@ -221,7 +225,7 @@ class ModelClient: 
     | 
|
| 
       221 
225 
     | 
    
         
             
                                    )
         
     | 
| 
       222 
226 
     | 
    
         
             
                                    if is_batch_input_valid and (not is_openai_chat_format(batch_inputs)):
         
     | 
| 
       223 
227 
     | 
    
         
             
                                        # If the batch input is valid, call the function with the batch inputs and the method name
         
     | 
| 
       224 
     | 
    
         
            -
                                        return call_func(batch_inputs, method_name)
         
     | 
| 
      
 228 
     | 
    
         
            +
                                        return call_func(batch_inputs, method_name, with_proto=with_proto)
         
     | 
| 
       225 
229 
     | 
    
         | 
| 
       226 
230 
     | 
    
         
             
                                for name, arg in zip(
         
     | 
| 
       227 
231 
     | 
    
         
             
                                    method_argnames, args
         
     | 
| 
         @@ -229,10 +233,13 @@ class ModelClient: 
     | 
|
| 
       229 
233 
     | 
    
         
             
                                    if name in kwargs:
         
     | 
| 
       230 
234 
     | 
    
         
             
                                        raise TypeError(f"Multiple values for argument {name}")
         
     | 
| 
       231 
235 
     | 
    
         
             
                                    kwargs[name] = arg
         
     | 
| 
       232 
     | 
    
         
            -
                                return call_func(kwargs, method_name)
         
     | 
| 
      
 236 
     | 
    
         
            +
                                return call_func(kwargs, method_name, with_proto=with_proto)
         
     | 
| 
       233 
237 
     | 
    
         | 
| 
       234 
238 
     | 
    
         
             
                            async def async_f(*args, **kwargs):
         
     | 
| 
       235 
239 
     | 
    
         
             
                                # Async version to call the async function
         
     | 
| 
      
 240 
     | 
    
         
            +
                                # Extract with_proto parameter if present
         
     | 
| 
      
 241 
     | 
    
         
            +
                                with_proto = kwargs.pop(RESERVED_PARAM_WITH_PROTO, False)
         
     | 
| 
      
 242 
     | 
    
         
            +
             
     | 
| 
       236 
243 
     | 
    
         
             
                                if len(args) > len(method_argnames):
         
     | 
| 
       237 
244 
     | 
    
         
             
                                    raise TypeError(
         
     | 
| 
       238 
245 
     | 
    
         
             
                                        f"{method_name}() takes {len(method_argnames)} positional arguments but {len(args)} were given"
         
     | 
| 
         @@ -249,7 +256,9 @@ class ModelClient: 
     | 
|
| 
       249 
256 
     | 
    
         
             
                                    )
         
     | 
| 
       250 
257 
     | 
    
         
             
                                    if is_batch_input_valid and (not is_openai_chat_format(batch_inputs)):
         
     | 
| 
       251 
258 
     | 
    
         
             
                                        # If the batch input is valid, call the function with the batch inputs and the method name
         
     | 
| 
       252 
     | 
    
         
            -
                                        return async_call_func( 
     | 
| 
      
 259 
     | 
    
         
            +
                                        return async_call_func(
         
     | 
| 
      
 260 
     | 
    
         
            +
                                            batch_inputs, method_name, with_proto=with_proto
         
     | 
| 
      
 261 
     | 
    
         
            +
                                        )
         
     | 
| 
       253 
262 
     | 
    
         | 
| 
       254 
263 
     | 
    
         
             
                                for name, arg in zip(
         
     | 
| 
       255 
264 
     | 
    
         
             
                                    method_argnames, args
         
     | 
| 
         @@ -258,7 +267,7 @@ class ModelClient: 
     | 
|
| 
       258 
267 
     | 
    
         
             
                                        raise TypeError(f"Multiple values for argument {name}")
         
     | 
| 
       259 
268 
     | 
    
         
             
                                    kwargs[name] = arg
         
     | 
| 
       260 
269 
     | 
    
         | 
| 
       261 
     | 
    
         
            -
                                return async_call_func(kwargs, method_name)
         
     | 
| 
      
 270 
     | 
    
         
            +
                                return async_call_func(kwargs, method_name, with_proto=with_proto)
         
     | 
| 
       262 
271 
     | 
    
         | 
| 
       263 
272 
     | 
    
         
             
                            class MethodWrapper:
         
     | 
| 
       264 
273 
     | 
    
         
             
                                def __call__(self, *args, **kwargs):
         
     | 
| 
         @@ -364,6 +373,7 @@ class ModelClient: 
     | 
|
| 
       364 
373 
     | 
    
         
             
                    self,
         
     | 
| 
       365 
374 
     | 
    
         
             
                    inputs,  # TODO set up functions according to fetched signatures?
         
     | 
| 
       366 
375 
     | 
    
         
             
                    method_name: str = 'predict',
         
     | 
| 
      
 376 
     | 
    
         
            +
                    with_proto: bool = False,
         
     | 
| 
       367 
377 
     | 
    
         
             
                ) -> Any:
         
     | 
| 
       368 
378 
     | 
    
         
             
                    input_signature = self._method_signatures[method_name].input_fields
         
     | 
| 
       369 
379 
     | 
    
         
             
                    output_signature = self._method_signatures[method_name].output_fields
         
     | 
| 
         @@ -385,9 +395,12 @@ class ModelClient: 
     | 
|
| 
       385 
395 
     | 
    
         
             
                    outputs = []
         
     | 
| 
       386 
396 
     | 
    
         
             
                    for output in response.outputs:
         
     | 
| 
       387 
397 
     | 
    
         
             
                        outputs.append(deserialize(output.data, output_signature, is_output=True))
         
     | 
| 
       388 
     | 
    
         
            -
             
     | 
| 
       389 
     | 
    
         
            -
             
     | 
| 
       390 
     | 
    
         
            -
             
     | 
| 
      
 398 
     | 
    
         
            +
             
     | 
| 
      
 399 
     | 
    
         
            +
                    result = outputs if batch_input else outputs[0]
         
     | 
| 
      
 400 
     | 
    
         
            +
             
     | 
| 
      
 401 
     | 
    
         
            +
                    if with_proto:
         
     | 
| 
      
 402 
     | 
    
         
            +
                        return result, response
         
     | 
| 
      
 403 
     | 
    
         
            +
                    return result
         
     | 
| 
       391 
404 
     | 
    
         | 
| 
       392 
405 
     | 
    
         
             
                def _predict_by_proto(
         
     | 
| 
       393 
406 
     | 
    
         
             
                    self,
         
     | 
| 
         @@ -448,15 +461,17 @@ class ModelClient: 
     | 
|
| 
       448 
461 
     | 
    
         
             
                    self,
         
     | 
| 
       449 
462 
     | 
    
         
             
                    inputs,
         
     | 
| 
       450 
463 
     | 
    
         
             
                    method_name: str = 'predict',
         
     | 
| 
      
 464 
     | 
    
         
            +
                    with_proto: bool = False,
         
     | 
| 
       451 
465 
     | 
    
         
             
                ) -> Any:
         
     | 
| 
       452 
466 
     | 
    
         
             
                    """Asynchronously process inputs and make predictions.
         
     | 
| 
       453 
467 
     | 
    
         | 
| 
       454 
468 
     | 
    
         
             
                    Args:
         
     | 
| 
       455 
469 
     | 
    
         
             
                        inputs: Input data to process
         
     | 
| 
       456 
470 
     | 
    
         
             
                        method_name (str): Name of the method to call
         
     | 
| 
      
 471 
     | 
    
         
            +
                        with_proto (bool): If True, return both the processed result and the raw protobuf response
         
     | 
| 
       457 
472 
     | 
    
         | 
| 
       458 
473 
     | 
    
         
             
                    Returns:
         
     | 
| 
       459 
     | 
    
         
            -
                        Processed prediction results
         
     | 
| 
      
 474 
     | 
    
         
            +
                        Processed prediction results, optionally with protobuf response
         
     | 
| 
       460 
475 
     | 
    
         
             
                    """
         
     | 
| 
       461 
476 
     | 
    
         
             
                    # method_name is set to 'predict' by default, this is because to replicate the input and output signature behaviour of sync to async predict.
         
     | 
| 
       462 
477 
     | 
    
         
             
                    input_signature = self._method_signatures[method_name].input_fields
         
     | 
| 
         @@ -477,7 +492,11 @@ class ModelClient: 
     | 
|
| 
       477 
492 
     | 
    
         
             
                    for output in response.outputs:
         
     | 
| 
       478 
493 
     | 
    
         
             
                        outputs.append(deserialize(output.data, output_signature, is_output=True))
         
     | 
| 
       479 
494 
     | 
    
         | 
| 
       480 
     | 
    
         
            -
                     
     | 
| 
      
 495 
     | 
    
         
            +
                    result = outputs if batch_input else outputs[0]
         
     | 
| 
      
 496 
     | 
    
         
            +
             
     | 
| 
      
 497 
     | 
    
         
            +
                    if with_proto:
         
     | 
| 
      
 498 
     | 
    
         
            +
                        return result, response
         
     | 
| 
      
 499 
     | 
    
         
            +
                    return result
         
     | 
| 
       481 
500 
     | 
    
         | 
| 
       482 
501 
     | 
    
         
             
                async def _async_predict_by_proto(
         
     | 
| 
       483 
502 
     | 
    
         
             
                    self,
         
     | 
| 
         @@ -551,6 +570,7 @@ class ModelClient: 
     | 
|
| 
       551 
570 
     | 
    
         
             
                    self,
         
     | 
| 
       552 
571 
     | 
    
         
             
                    inputs,  # TODO set up functions according to fetched signatures?
         
     | 
| 
       553 
572 
     | 
    
         
             
                    method_name: str = 'generate',
         
     | 
| 
      
 573 
     | 
    
         
            +
                    with_proto: bool = False,
         
     | 
| 
       554 
574 
     | 
    
         
             
                ) -> Any:
         
     | 
| 
       555 
575 
     | 
    
         
             
                    input_signature = self._method_signatures[method_name].input_fields
         
     | 
| 
       556 
576 
     | 
    
         
             
                    output_signature = self._method_signatures[method_name].output_fields
         
     | 
| 
         @@ -572,10 +592,13 @@ class ModelClient: 
     | 
|
| 
       572 
592 
     | 
    
         
             
                        outputs = []
         
     | 
| 
       573 
593 
     | 
    
         
             
                        for output in response.outputs:
         
     | 
| 
       574 
594 
     | 
    
         
             
                            outputs.append(deserialize(output.data, output_signature, is_output=True))
         
     | 
| 
       575 
     | 
    
         
            -
             
     | 
| 
       576 
     | 
    
         
            -
             
     | 
| 
      
 595 
     | 
    
         
            +
             
     | 
| 
      
 596 
     | 
    
         
            +
                        result = outputs if batch_input else outputs[0]
         
     | 
| 
      
 597 
     | 
    
         
            +
             
     | 
| 
      
 598 
     | 
    
         
            +
                        if with_proto:
         
     | 
| 
      
 599 
     | 
    
         
            +
                            yield result, response
         
     | 
| 
       577 
600 
     | 
    
         
             
                        else:
         
     | 
| 
       578 
     | 
    
         
            -
                            yield  
     | 
| 
      
 601 
     | 
    
         
            +
                            yield result
         
     | 
| 
       579 
602 
     | 
    
         | 
| 
       580 
603 
     | 
    
         
             
                def _generate_by_proto(
         
     | 
| 
       581 
604 
     | 
    
         
             
                    self,
         
     | 
| 
         @@ -641,6 +664,7 @@ class ModelClient: 
     | 
|
| 
       641 
664 
     | 
    
         
             
                    self,
         
     | 
| 
       642 
665 
     | 
    
         
             
                    inputs,
         
     | 
| 
       643 
666 
     | 
    
         
             
                    method_name: str = 'generate',
         
     | 
| 
      
 667 
     | 
    
         
            +
                    with_proto: bool = False,
         
     | 
| 
       644 
668 
     | 
    
         
             
                ) -> Any:
         
     | 
| 
       645 
669 
     | 
    
         
             
                    # method_name is set to 'generate' by default, this is because to replicate the input and output signature behaviour of sync to async generate.
         
     | 
| 
       646 
670 
     | 
    
         
             
                    input_signature = self._method_signatures[method_name].input_fields
         
     | 
| 
         @@ -654,18 +678,21 @@ class ModelClient: 
     | 
|
| 
       654 
678 
     | 
    
         
             
                    proto_inputs = []
         
     | 
| 
       655 
679 
     | 
    
         
             
                    for input in inputs:
         
     | 
| 
       656 
680 
     | 
    
         
             
                        proto = resources_pb2.Input()
         
     | 
| 
       657 
     | 
    
         
            -
             
     | 
| 
       658 
     | 
    
         
            -
             
     | 
| 
      
 681 
     | 
    
         
            +
                        serialize(input, input_signature, proto.data)
         
     | 
| 
      
 682 
     | 
    
         
            +
                        proto_inputs.append(proto)
         
     | 
| 
       659 
683 
     | 
    
         
             
                    response_stream = self._async_generate_by_proto(proto_inputs, method_name)
         
     | 
| 
       660 
684 
     | 
    
         | 
| 
       661 
685 
     | 
    
         
             
                    async for response in response_stream:
         
     | 
| 
       662 
686 
     | 
    
         
             
                        outputs = []
         
     | 
| 
       663 
687 
     | 
    
         
             
                        for output in response.outputs:
         
     | 
| 
       664 
688 
     | 
    
         
             
                            outputs.append(deserialize(output.data, output_signature, is_output=True))
         
     | 
| 
       665 
     | 
    
         
            -
             
     | 
| 
       666 
     | 
    
         
            -
             
     | 
| 
      
 689 
     | 
    
         
            +
             
     | 
| 
      
 690 
     | 
    
         
            +
                        result = outputs if batch_input else outputs[0]
         
     | 
| 
      
 691 
     | 
    
         
            +
             
     | 
| 
      
 692 
     | 
    
         
            +
                        if with_proto:
         
     | 
| 
      
 693 
     | 
    
         
            +
                            yield result, response
         
     | 
| 
       667 
694 
     | 
    
         
             
                        else:
         
     | 
| 
       668 
     | 
    
         
            -
                            yield  
     | 
| 
      
 695 
     | 
    
         
            +
                            yield result
         
     | 
| 
       669 
696 
     | 
    
         | 
| 
       670 
697 
     | 
    
         
             
                async def _async_generate_by_proto(
         
     | 
| 
       671 
698 
     | 
    
         
             
                    self,
         
     | 
| 
         @@ -734,6 +761,7 @@ class ModelClient: 
     | 
|
| 
       734 
761 
     | 
    
         
             
                    self,
         
     | 
| 
       735 
762 
     | 
    
         
             
                    inputs,
         
     | 
| 
       736 
763 
     | 
    
         
             
                    method_name: str = 'stream',
         
     | 
| 
      
 764 
     | 
    
         
            +
                    with_proto: bool = False,
         
     | 
| 
       737 
765 
     | 
    
         
             
                ) -> Any:
         
     | 
| 
       738 
766 
     | 
    
         
             
                    input_signature = self._method_signatures[method_name].input_fields
         
     | 
| 
       739 
767 
     | 
    
         
             
                    output_signature = self._method_signatures[method_name].output_fields
         
     | 
| 
         @@ -775,7 +803,12 @@ class ModelClient: 
     | 
|
| 
       775 
803 
     | 
    
         | 
| 
       776 
804 
     | 
    
         
             
                    for response in response_stream:
         
     | 
| 
       777 
805 
     | 
    
         
             
                        assert len(response.outputs) == 1, 'streaming methods must have exactly one output'
         
     | 
| 
       778 
     | 
    
         
            -
                         
     | 
| 
      
 806 
     | 
    
         
            +
                        result = deserialize(response.outputs[0].data, output_signature, is_output=True)
         
     | 
| 
      
 807 
     | 
    
         
            +
             
     | 
| 
      
 808 
     | 
    
         
            +
                        if with_proto:
         
     | 
| 
      
 809 
     | 
    
         
            +
                            yield result, response
         
     | 
| 
      
 810 
     | 
    
         
            +
                        else:
         
     | 
| 
      
 811 
     | 
    
         
            +
                            yield result
         
     | 
| 
       779 
812 
     | 
    
         | 
| 
       780 
813 
     | 
    
         
             
                def _req_iterator(
         
     | 
| 
       781 
814 
     | 
    
         
             
                    self,
         
     | 
| 
         @@ -843,6 +876,7 @@ class ModelClient: 
     | 
|
| 
       843 
876 
     | 
    
         
             
                    self,
         
     | 
| 
       844 
877 
     | 
    
         
             
                    inputs,
         
     | 
| 
       845 
878 
     | 
    
         
             
                    method_name: str = 'stream',
         
     | 
| 
      
 879 
     | 
    
         
            +
                    with_proto: bool = False,
         
     | 
| 
       846 
880 
     | 
    
         
             
                ) -> Any:
         
     | 
| 
       847 
881 
     | 
    
         
             
                    # method_name is set to 'stream' by default, this is because to replicate the input and output signature behaviour of sync to async stream.
         
     | 
| 
       848 
882 
     | 
    
         
             
                    input_signature = self._method_signatures[method_name].input_fields
         
     | 
| 
         @@ -885,7 +919,12 @@ class ModelClient: 
     | 
|
| 
       885 
919 
     | 
    
         | 
| 
       886 
920 
     | 
    
         
             
                    async for response in response_stream:
         
     | 
| 
       887 
921 
     | 
    
         
             
                        assert len(response.outputs) == 1, 'streaming methods must have exactly one output'
         
     | 
| 
       888 
     | 
    
         
            -
                         
     | 
| 
      
 922 
     | 
    
         
            +
                        result = deserialize(response.outputs[0].data, output_signature, is_output=True)
         
     | 
| 
      
 923 
     | 
    
         
            +
             
     | 
| 
      
 924 
     | 
    
         
            +
                        if with_proto:
         
     | 
| 
      
 925 
     | 
    
         
            +
                            yield result, response
         
     | 
| 
      
 926 
     | 
    
         
            +
                        else:
         
     | 
| 
      
 927 
     | 
    
         
            +
                            yield result
         
     | 
| 
       889 
928 
     | 
    
         | 
| 
       890 
929 
     | 
    
         
             
                async def _async_stream_by_proto(
         
     | 
| 
       891 
930 
     | 
    
         
             
                    self,
         
     | 
| 
         @@ -23,6 +23,9 @@ from clarifai.runners.utils.serializers import ( 
     | 
|
| 
       23 
23 
     | 
    
         
             
                TupleSerializer,
         
     | 
| 
       24 
24 
     | 
    
         
             
            )
         
     | 
| 
       25 
25 
     | 
    
         | 
| 
      
 26 
     | 
    
         
            +
            # Reserved parameter name for protobuf response access
         
     | 
| 
      
 27 
     | 
    
         
            +
            RESERVED_PARAM_WITH_PROTO = 'with_proto'
         
     | 
| 
      
 28 
     | 
    
         
            +
             
     | 
| 
       26 
29 
     | 
    
         | 
| 
       27 
30 
     | 
    
         
             
            def build_function_signature(func):
         
     | 
| 
       28 
31 
     | 
    
         
             
                '''
         
     | 
| 
         @@ -45,6 +48,12 @@ def build_function_signature(func): 
     | 
|
| 
       45 
48 
     | 
    
         
             
                input_sigs = []
         
     | 
| 
       46 
49 
     | 
    
         
             
                input_streaming = []
         
     | 
| 
       47 
50 
     | 
    
         
             
                for p in sig.parameters.values():
         
     | 
| 
      
 51 
     | 
    
         
            +
                    # Validate that user methods don't use reserved parameter names
         
     | 
| 
      
 52 
     | 
    
         
            +
                    if p.name == RESERVED_PARAM_WITH_PROTO:
         
     | 
| 
      
 53 
     | 
    
         
            +
                        raise ValueError(
         
     | 
| 
      
 54 
     | 
    
         
            +
                            f"Parameter name '{RESERVED_PARAM_WITH_PROTO}' is reserved and cannot be used in model methods. "
         
     | 
| 
      
 55 
     | 
    
         
            +
                            f"This parameter is automatically added by the framework to provide access to protobuf responses."
         
     | 
| 
      
 56 
     | 
    
         
            +
                        )
         
     | 
| 
       48 
57 
     | 
    
         
             
                    model_type_field, _, streaming = build_variable_signature(p.name, p.annotation, p.default)
         
     | 
| 
       49 
58 
     | 
    
         
             
                    input_sigs.append(model_type_field)
         
     | 
| 
       50 
59 
     | 
    
         
             
                    input_streaming.append(streaming)
         
     | 
| 
         @@ -229,7 +229,7 @@ def validate_context_auth(pat: str, user_id: str, api_base: str = None): 
     | 
|
| 
       229 
229 
     | 
    
         | 
| 
       230 
230 
     | 
    
         | 
| 
       231 
231 
     | 
    
         
             
            def customize_ollama_model(
         
     | 
| 
       232 
     | 
    
         
            -
                model_path, model_name=None, port=None, context_length=None, verbose=False
         
     | 
| 
      
 232 
     | 
    
         
            +
                model_path, user_id, model_name=None, port=None, context_length=None, verbose=False
         
     | 
| 
       233 
233 
     | 
    
         
             
            ):
         
     | 
| 
       234 
234 
     | 
    
         
             
                """Customize the Ollama model name in the cloned template files.
         
     | 
| 
       235 
235 
     | 
    
         
             
                Args:
         
     | 
| 
         @@ -240,6 +240,24 @@ def customize_ollama_model( 
     | 
|
| 
       240 
240 
     | 
    
         
             
                 verbose: Whether to enable verbose logging - optional (defaults to False)
         
     | 
| 
       241 
241 
     | 
    
         | 
| 
       242 
242 
     | 
    
         
             
                """
         
     | 
| 
      
 243 
     | 
    
         
            +
                config_path = os.path.join(model_path, 'config.yaml')
         
     | 
| 
      
 244 
     | 
    
         
            +
                if os.path.exists(config_path):
         
     | 
| 
      
 245 
     | 
    
         
            +
                    with open(config_path, 'r') as f:
         
     | 
| 
      
 246 
     | 
    
         
            +
                        config = yaml.safe_load(f)
         
     | 
| 
      
 247 
     | 
    
         
            +
             
     | 
| 
      
 248 
     | 
    
         
            +
                    # Update the user_id in the model section
         
     | 
| 
      
 249 
     | 
    
         
            +
                    config['model']['user_id'] = user_id
         
     | 
| 
      
 250 
     | 
    
         
            +
                    if 'toolkit' not in config or config['toolkit'] is None:
         
     | 
| 
      
 251 
     | 
    
         
            +
                        config['toolkit'] = {}
         
     | 
| 
      
 252 
     | 
    
         
            +
                    if model_name is not None:
         
     | 
| 
      
 253 
     | 
    
         
            +
                        config['toolkit']['model'] = model_name
         
     | 
| 
      
 254 
     | 
    
         
            +
                    if port is not None:
         
     | 
| 
      
 255 
     | 
    
         
            +
                        config['toolkit']['port'] = port
         
     | 
| 
      
 256 
     | 
    
         
            +
                    if context_length is not None:
         
     | 
| 
      
 257 
     | 
    
         
            +
                        config['toolkit']['context_length'] = context_length
         
     | 
| 
      
 258 
     | 
    
         
            +
                    with open(config_path, 'w') as f:
         
     | 
| 
      
 259 
     | 
    
         
            +
                        yaml.dump(config, f, default_flow_style=False, sort_keys=False)
         
     | 
| 
      
 260 
     | 
    
         
            +
             
     | 
| 
       243 
261 
     | 
    
         
             
                model_py_path = os.path.join(model_path, "1", "model.py")
         
     | 
| 
       244 
262 
     | 
    
         | 
| 
       245 
263 
     | 
    
         
             
                if not os.path.exists(model_py_path):
         
     | 
| 
         @@ -405,16 +423,20 @@ def convert_timestamp_to_string(timestamp: Timestamp) -> str: 
     | 
|
| 
       405 
423 
     | 
    
         
             
                return datetime_obj.strftime('%Y-%m-%dT%H:%M:%SZ')
         
     | 
| 
       406 
424 
     | 
    
         | 
| 
       407 
425 
     | 
    
         | 
| 
       408 
     | 
    
         
            -
            def customize_huggingface_model(model_path, model_name):
         
     | 
| 
      
 426 
     | 
    
         
            +
            def customize_huggingface_model(model_path, user_id, model_name):
         
     | 
| 
       409 
427 
     | 
    
         
             
                config_path = os.path.join(model_path, 'config.yaml')
         
     | 
| 
       410 
428 
     | 
    
         
             
                if os.path.exists(config_path):
         
     | 
| 
       411 
429 
     | 
    
         
             
                    with open(config_path, 'r') as f:
         
     | 
| 
       412 
430 
     | 
    
         
             
                        config = yaml.safe_load(f)
         
     | 
| 
       413 
431 
     | 
    
         | 
| 
       414 
     | 
    
         
            -
                    # Update the  
     | 
| 
       415 
     | 
    
         
            -
                     
     | 
| 
       416 
     | 
    
         
            -
             
     | 
| 
       417 
     | 
    
         
            -
                     
     | 
| 
      
 432 
     | 
    
         
            +
                    # Update the user_id in the model section
         
     | 
| 
      
 433 
     | 
    
         
            +
                    config['model']['user_id'] = user_id
         
     | 
| 
      
 434 
     | 
    
         
            +
             
     | 
| 
      
 435 
     | 
    
         
            +
                    if model_name:
         
     | 
| 
      
 436 
     | 
    
         
            +
                        # Update the repo_id in checkpoints section
         
     | 
| 
      
 437 
     | 
    
         
            +
                        if 'checkpoints' not in config:
         
     | 
| 
      
 438 
     | 
    
         
            +
                            config['checkpoints'] = {}
         
     | 
| 
      
 439 
     | 
    
         
            +
                        config['checkpoints']['repo_id'] = model_name
         
     | 
| 
       418 
440 
     | 
    
         | 
| 
       419 
441 
     | 
    
         
             
                    with open(config_path, 'w') as f:
         
     | 
| 
       420 
442 
     | 
    
         
             
                        yaml.dump(config, f, default_flow_style=False, sort_keys=False)
         
     | 
| 
         @@ -424,7 +446,7 @@ def customize_huggingface_model(model_path, model_name): 
     | 
|
| 
       424 
446 
     | 
    
         
             
                    logger.warning(f"config.yaml not found at {config_path}, skipping model configuration")
         
     | 
| 
       425 
447 
     | 
    
         | 
| 
       426 
448 
     | 
    
         | 
| 
       427 
     | 
    
         
            -
            def customize_lmstudio_model(model_path, model_name, port, context_length):
         
     | 
| 
      
 449 
     | 
    
         
            +
            def customize_lmstudio_model(model_path, user_id, model_name, port, context_length):
         
     | 
| 
       428 
450 
     | 
    
         
             
                """Customize the LM Studio model name in the cloned template files.
         
     | 
| 
       429 
451 
     | 
    
         
             
                Args:
         
     | 
| 
       430 
452 
     | 
    
         
             
                 model_path: Path to the cloned model directory
         
     | 
| 
         @@ -438,6 +460,8 @@ def customize_lmstudio_model(model_path, model_name, port, context_length): 
     | 
|
| 
       438 
460 
     | 
    
         
             
                if os.path.exists(config_path):
         
     | 
| 
       439 
461 
     | 
    
         
             
                    with open(config_path, 'r') as f:
         
     | 
| 
       440 
462 
     | 
    
         
             
                        config = yaml.safe_load(f)
         
     | 
| 
      
 463 
     | 
    
         
            +
                    # Update the user_id in the model section
         
     | 
| 
      
 464 
     | 
    
         
            +
                    config['model']['user_id'] = user_id
         
     | 
| 
       441 
465 
     | 
    
         
             
                    if 'toolkit' not in config or config['toolkit'] is None:
         
     | 
| 
       442 
466 
     | 
    
         
             
                        config['toolkit'] = {}
         
     | 
| 
       443 
467 
     | 
    
         
             
                    if model_name is not None:
         
     | 
| 
         @@ -66,6 +66,7 @@ DEFAULT_OLLAMA_MODEL_REPO_BRANCH = "ollama" 
     | 
|
| 
       66 
66 
     | 
    
         
             
            DEFAULT_HF_MODEL_REPO_BRANCH = "huggingface"
         
     | 
| 
       67 
67 
     | 
    
         
             
            DEFAULT_LMSTUDIO_MODEL_REPO_BRANCH = "lmstudio"
         
     | 
| 
       68 
68 
     | 
    
         
             
            DEFAULT_VLLM_MODEL_REPO_BRANCH = "vllm"
         
     | 
| 
      
 69 
     | 
    
         
            +
            DEFAULT_PYTHON_MODEL_REPO_BRANCH = "python"
         
     | 
| 
       69 
70 
     | 
    
         | 
| 
       70 
71 
     | 
    
         
             
            STATUS_OK = "200 OK"
         
     | 
| 
       71 
72 
     | 
    
         
             
            STATUS_MIXED = "207 MIXED"
         
     |