clarifai 11.4.8__py3-none-any.whl → 11.4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clarifai/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "11.4.8"
1
+ __version__ = "11.4.9"
clarifai/cli/model.py CHANGED
@@ -25,6 +25,86 @@ def model():
25
25
  """Manage models: upload, test, local dev, predict, etc"""
26
26
 
27
27
 
28
+ @model.command()
29
+ @click.argument(
30
+ "model_path",
31
+ type=click.Path(),
32
+ required=False,
33
+ default=".",
34
+ )
35
+ @click.option(
36
+ '--model-type-id',
37
+ type=click.Choice(['mcp', 'openai'], case_sensitive=False),
38
+ required=False,
39
+ help='Model type: "mcp" for MCPModelClass, "openai" for OpenAIModelClass, or leave empty for default ModelClass.',
40
+ )
41
+ def init(model_path, model_type_id):
42
+ """Initialize a new model directory structure.
43
+
44
+ Creates the following structure in the specified directory:
45
+ ├── 1/
46
+ │ └── model.py
47
+ ├── requirements.txt
48
+ └── config.yaml
49
+
50
+ MODEL_PATH: Path where to create the model directory structure. If not specified, the current directory is used by default.
51
+ """
52
+ from clarifai.cli.model_templates import (
53
+ get_config_template,
54
+ get_model_template,
55
+ get_requirements_template,
56
+ )
57
+
58
+ # Resolve the absolute path
59
+ model_path = os.path.abspath(model_path)
60
+
61
+ # Create the model directory if it doesn't exist
62
+ os.makedirs(model_path, exist_ok=True)
63
+
64
+ # Create the 1/ subdirectory
65
+ model_version_dir = os.path.join(model_path, "1")
66
+ os.makedirs(model_version_dir, exist_ok=True)
67
+
68
+ # Create model.py
69
+ model_py_path = os.path.join(model_version_dir, "model.py")
70
+ if os.path.exists(model_py_path):
71
+ logger.warning(f"File {model_py_path} already exists, skipping...")
72
+ else:
73
+ model_template = get_model_template(model_type_id)
74
+ with open(model_py_path, 'w') as f:
75
+ f.write(model_template)
76
+ logger.info(f"Created {model_py_path}")
77
+
78
+ # Create requirements.txt
79
+ requirements_path = os.path.join(model_path, "requirements.txt")
80
+ if os.path.exists(requirements_path):
81
+ logger.warning(f"File {requirements_path} already exists, skipping...")
82
+ else:
83
+ requirements_template = get_requirements_template(model_type_id)
84
+ with open(requirements_path, 'w') as f:
85
+ f.write(requirements_template)
86
+ logger.info(f"Created {requirements_path}")
87
+
88
+ # Create config.yaml
89
+ config_path = os.path.join(model_path, "config.yaml")
90
+ if os.path.exists(config_path):
91
+ logger.warning(f"File {config_path} already exists, skipping...")
92
+ else:
93
+ config_model_type_id = "text-to-text" # default
94
+
95
+ config_template = get_config_template(config_model_type_id)
96
+ with open(config_path, 'w') as f:
97
+ f.write(config_template)
98
+ logger.info(f"Created {config_path}")
99
+
100
+ logger.info(f"Model initialization complete in {model_path}")
101
+ logger.info("Next steps:")
102
+ logger.info("1. Search for '# TODO: please fill in' comments in the generated files")
103
+ logger.info("2. Update the model configuration in config.yaml")
104
+ logger.info("3. Add your model dependencies to requirements.txt")
105
+ logger.info("4. Implement your model logic in 1/model.py")
106
+
107
+
28
108
  @model.command()
29
109
  @click.argument("model_path", type=click.Path(exists=True), required=False, default=".")
30
110
  @click.option(
@@ -0,0 +1,243 @@
1
+ """Templates for model initialization."""
2
+
3
+ from clarifai import __version__
4
+
5
+
6
+ def get_model_class_template() -> str:
7
+ """Return the template for a basic ModelClass-based model."""
8
+ return '''from typing import Iterator, List
9
+ from clarifai.runners.models.model_class import ModelClass
10
+ from clarifai.runners.util.data_utils import Param
11
+
12
+ class MyModel(ModelClass):
13
+ """A custom model implementation using ModelClass."""
14
+
15
+ def load_model(self):
16
+ """Load the model here.
17
+ # TODO: please fill in
18
+ # Add your model loading logic here
19
+ """
20
+ pass
21
+
22
+ @ModelClass.method
23
+ def predict(
24
+ self,
25
+ prompt: str = "",
26
+ chat_history: List[dict] = None,
27
+ max_tokens: int = Param(default=256, description="The maximum number of tokens to generate. Shorter token lengths will provide faster performance."),
28
+ temperature: float = Param(default=1.0, description="A decimal number that determines the degree of randomness in the response"),
29
+ top_p: float = Param(default=1.0, description="An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass."),
30
+ ) -> str:
31
+ """This is the method that will be called when the runner is run. It takes in an input and returns an output."""
32
+ # TODO: please fill in
33
+ # Implement your prediction logic here
34
+ pass # Replace with your actual logic
35
+
36
+ @ModelClass.method
37
+ def generate(
38
+ self,
39
+ prompt: str = "",
40
+ chat_history: List[dict] = None,
41
+ max_tokens: int = Param(default=256, description="The maximum number of tokens to generate. Shorter token lengths will provide faster performance."),
42
+ temperature: float = Param(default=1.0, description="A decimal number that determines the degree of randomness in the response"),
43
+ top_p: float = Param(default=1.0, description="An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass."),
44
+ ) -> Iterator[str]:
45
+ """Example yielding a streamed response."""
46
+ # TODO: please fill in
47
+ # Implement your generation logic here
48
+ pass # Replace with your actual logic
49
+ '''
50
+
51
+
52
+ def get_mcp_model_class_template() -> str:
53
+ """Return the template for an MCPModelClass-based model."""
54
+ return '''from typing import Any
55
+
56
+ from fastmcp import FastMCP # use fastmcp v2 not the built in mcp
57
+ from pydantic import Field
58
+
59
+ from clarifai.runners.models.mcp_class import MCPModelClass
60
+
61
+ # TODO: please fill in
62
+ # Configure your FastMCP server
63
+ server = FastMCP("my-mcp-server", instructions="", stateless_http=True)
64
+
65
+
66
+ # TODO: please fill in
67
+ # Add your tools, resources, and prompts here
68
+ @server.tool("example_tool", description="An example tool")
69
+ def example_tool(input_param: Any = Field(description="Example input parameter")):
70
+ """Example tool implementation."""
71
+ # TODO: please fill in
72
+ # Implement your tool logic here
73
+ return f"Processed: {input_param}"
74
+
75
+
76
+ # Static resource example
77
+ @server.resource("config://version")
78
+ def get_version():
79
+ """Example static resource."""
80
+ # TODO: please fill in
81
+ # Return your resource data
82
+ return "1.0.0"
83
+
84
+
85
+ @server.prompt()
86
+ def example_prompt(text: str) -> str:
87
+ """Example prompt template."""
88
+ # TODO: please fill in
89
+ # Define your prompt template
90
+ return f"Process this text: {text}"
91
+
92
+
93
+ class MyModel(MCPModelClass):
94
+ """A custom model implementation using MCPModelClass."""
95
+
96
+ def get_server(self) -> FastMCP:
97
+ """Return the FastMCP server instance."""
98
+ return server
99
+ '''
100
+
101
+
102
+ def get_openai_model_class_template() -> str:
103
+ """Return the template for an OpenAIModelClass-based model."""
104
+ return '''from typing import List
105
+ from openai import OpenAI
106
+ from clarifai.runners.models.openai_class import OpenAIModelClass
107
+ from clarifai.runners.util.data_utils import Param
108
+ from clarifai.runners.utils.openai_convertor import build_openai_messages
109
+
110
+ class MyModel(OpenAIModelClass):
111
+ """A custom model implementation using OpenAIModelClass."""
112
+
113
+ # TODO: please fill in
114
+ # Configure your OpenAI-compatible client for local model
115
+ client = OpenAI(
116
+ api_key="local-key", # TODO: please fill in - use your local API key
117
+ base_url="http://localhost:8000/v1", # TODO: please fill in - your local model server endpoint
118
+ )
119
+
120
+ # TODO: please fill in
121
+ # Specify the model name to use
122
+ model = "my-local-model" # TODO: please fill in - replace with your local model name
123
+
124
+ def load_model(self):
125
+ """Optional: Add any additional model loading logic here."""
126
+ # TODO: please fill in (optional)
127
+ # Add any initialization logic if needed
128
+ pass
129
+
130
+ @OpenAIModelClass.method
131
+ def predict(
132
+ self,
133
+ prompt: str = "",
134
+ chat_history: List[dict] = None,
135
+ max_tokens: int = Param(default=256, description="The maximum number of tokens to generate. Shorter token lengths will provide faster performance."),
136
+ temperature: float = Param(default=1.0, description="A decimal number that determines the degree of randomness in the response"),
137
+ top_p: float = Param(default=1.0, description="An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass."),
138
+ ) -> str:
139
+ """Run a single prompt completion using the OpenAI client."""
140
+ # TODO: please fill in
141
+ # Implement your prediction logic here
142
+ messages = build_openai_messages(prompt, chat_history)
143
+ response = self.client.chat.completions.create(
144
+ model=self.model,
145
+ messages=messages,
146
+ max_completion_tokens=max_tokens,
147
+ temperature=temperature,
148
+ top_p=top_p,
149
+ )
150
+ return response.choices[0].message.content
151
+
152
+ @OpenAIModelClass.method
153
+ def generate(
154
+ self,
155
+ prompt: str = "",
156
+ chat_history: List[dict] = None,
157
+ max_tokens: int = Param(default=256, description="The maximum number of tokens to generate. Shorter token lengths will provide faster performance."),
158
+ temperature: float = Param(default=1.0, description="A decimal number that determines the degree of randomness in the response"),
159
+ top_p: float = Param(default=1.0, description="An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass."),
160
+ ):
161
+ """Stream a completion response using the OpenAI client."""
162
+ # TODO: please fill in
163
+ # Implement your streaming logic here
164
+ messages = build_openai_messages(prompt, chat_history)
165
+ stream = self.client.chat.completions.create(
166
+ model=self.model,
167
+ messages=messages,
168
+ max_completion_tokens=max_tokens,
169
+ temperature=temperature,
170
+ top_p=top_p,
171
+ stream=True,
172
+ )
173
+ for chunk in stream:
174
+ if chunk.choices:
175
+ text = (chunk.choices[0].delta.content
176
+ if (chunk and chunk.choices[0].delta.content) is not None else '')
177
+ yield text
178
+ '''
179
+
180
+
181
+ def get_config_template(model_type_id: str = "text-to-text") -> str:
182
+ """Return the template for config.yaml."""
183
+ return f'''# Configuration file for your Clarifai model
184
+
185
+ model:
186
+ id: "my-model" # TODO: please fill in - replace with your model ID
187
+ user_id: "user_id" # TODO: please fill in - replace with your user ID
188
+ app_id: "app_id" # TODO: please fill in - replace with your app ID
189
+ model_type_id: "{model_type_id}" # TODO: please fill in - replace if different model type ID
190
+
191
+ build_info:
192
+ python_version: "3.12"
193
+
194
+ # TODO: please fill in - adjust compute requirements for your model
195
+ inference_compute_info:
196
+ cpu_limit: "1" # TODO: please fill in - Amount of CPUs to use as a limit
197
+ cpu_memory: "1Gi" # TODO: please fill in - Amount of CPU memory to use as a limit
198
+ cpu_requests: "0.5" # TODO: please fill in - Amount of CPUs to use as a minimum
199
+ cpu_memory_requests: "512Mi" # TODO: please fill in - Amount of CPU memory to use as a minimum
200
+ num_accelerators: 1 # TODO: please fill in - Amount of GPU/TPUs to use
201
+ accelerator_type: ["NVIDIA-*"] # TODO: please fill in - type of accelerators requested
202
+ accelerator_memory: "1Gi" # TODO: please fill in - Amount of accelerator/GPU memory to use as a minimum
203
+
204
+ # TODO: please fill in (optional) - add checkpoints section if needed
205
+ # checkpoints:
206
+ # type: "huggingface" # supported type
207
+ # repo_id: "your-model-repo" # for huggingface
208
+ # when: "build" # or "runtime", "upload"
209
+ '''
210
+
211
+
212
+ def get_requirements_template(model_type_id: str = None) -> str:
213
+ """Return the template for requirements.txt."""
214
+ requirements = f'''# Clarifai SDK - required
215
+ clarifai>={__version__}
216
+ '''
217
+ if model_type_id == "mcp":
218
+ requirements += "fastmcp\n"
219
+ elif model_type_id == "openai":
220
+ requirements += "openai\n"
221
+ requirements += '''
222
+ # TODO: please fill in - add your model's dependencies here
223
+ # Examples:
224
+ # torch>=2.0.0
225
+ # transformers>=4.30.0
226
+ # numpy>=1.21.0
227
+ # pillow>=9.0.0
228
+ '''
229
+ return requirements
230
+
231
+
232
+ # Mapping of model type IDs to their corresponding templates
233
+ MODEL_TYPE_TEMPLATES = {
234
+ "mcp": get_mcp_model_class_template,
235
+ "openai": get_openai_model_class_template,
236
+ }
237
+
238
+
239
+ def get_model_template(model_type_id: str = None) -> str:
240
+ """Get the appropriate model template based on model_type_id."""
241
+ if model_type_id in MODEL_TYPE_TEMPLATES:
242
+ return MODEL_TYPE_TEMPLATES[model_type_id]()
243
+ return get_model_class_template()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clarifai
3
- Version: 11.4.8
3
+ Version: 11.4.9
4
4
  Home-page: https://github.com/Clarifai/clarifai-python
5
5
  Author: Clarifai
6
6
  Author-email: support@clarifai.com
@@ -608,3 +608,8 @@ rag_agent = RAG(workflow_url="WORKFLOW_URL")
608
608
 
609
609
  See many more code examples in this [repo](https://github.com/Clarifai/examples).
610
610
  Also see the official [Python SDK docs](https://clarifai-python.readthedocs.io/en/latest/index.html)
611
+
612
+ ## :open_file_folder: Model Upload
613
+
614
+ Examples for uploading models and runners have been moved to this [repo](https://github.com/Clarifai/runners-examples).
615
+ Find our official documentation at [docs.clarifai.com/compute/models/upload](https://docs.clarifai.com/compute/models/upload).
@@ -1,4 +1,4 @@
1
- clarifai/__init__.py,sha256=fdwUPqydH_J5QG9a6SZJDmJJHKUfCG6gEA5tSBI_n0k,23
1
+ clarifai/__init__.py,sha256=SIhopzpvdgMSl3sPaYcNcFup4S7F1oJ6rqaOtCSjmJ4,23
2
2
  clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  clarifai/errors.py,sha256=GXa6D4v_L404J83jnRNFPH7s-1V9lk7w6Ws99f1g-AY,2772
4
4
  clarifai/versions.py,sha256=ecSuEB_nOL2XSoYHDw2n23XUbm_KPOGjudMXmQrGdS8,224
@@ -8,7 +8,8 @@ clarifai/cli/__main__.py,sha256=7nPbLW7Jr2shkgMPvnxpn4xYGMvIcnqluJ69t9w4H_k,74
8
8
  clarifai/cli/base.py,sha256=mzfAHRhon6tKntpxk241GD-Sjrb2-V99nAOasElLuuw,8254
9
9
  clarifai/cli/compute_cluster.py,sha256=8Xss0Obrp6l1XuxJe0luOqU_pf8vXGDRi6jyIe8qR6k,2282
10
10
  clarifai/cli/deployment.py,sha256=9C4I6_kyMxRkWl6h681wc79-3mAtDHtTUaxRv05OZMs,4262
11
- clarifai/cli/model.py,sha256=R9Bl32ELs4w658dDLeugjwf5d_3SMJiv64ciEqoWaTQ,27291
11
+ clarifai/cli/model.py,sha256=9sJ5p4TJc41vOeeFoMC3e8cciscSU7TmvAgW0wfO54U,30191
12
+ clarifai/cli/model_templates.py,sha256=_ZonIBnY9KKSJY31KZbUys_uN_k_Txu7Dip12KWfmSU,9633
12
13
  clarifai/cli/nodepool.py,sha256=H6OIdUW_EiyDUwZogzEDoYmVwEjLMsgoDlPyE7gjIuU,4245
13
14
  clarifai/client/__init__.py,sha256=NhpNFRJY6mTi8ca-5hUeTEmYeDKHDNXY48FN63pDuos,703
14
15
  clarifai/client/app.py,sha256=D0FG9v07g1dExLnQsYt0OQjsJCkVvuw76BOpcqaCzfM,41380
@@ -106,9 +107,9 @@ clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
106
107
  clarifai/workflows/export.py,sha256=Oq3RVNKvv1iH46U6oIjXa-MXWJ4sTlXr_NSfwoxr3H4,2149
107
108
  clarifai/workflows/utils.py,sha256=ESL3INcouNcLKCh-nMpfXX-YbtCzX7tz7hT57_RGQ3M,2079
108
109
  clarifai/workflows/validate.py,sha256=UhmukyHkfxiMFrPPeBdUTiCOHQT5-shqivlBYEyKTlU,2931
109
- clarifai-11.4.8.dist-info/licenses/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
110
- clarifai-11.4.8.dist-info/METADATA,sha256=MpDyC9bKsOSoUeOw-xPJDPHPFvaBCR0eNPb6vJy3z74,22398
111
- clarifai-11.4.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
112
- clarifai-11.4.8.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
113
- clarifai-11.4.8.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
114
- clarifai-11.4.8.dist-info/RECORD,,
110
+ clarifai-11.4.9.dist-info/licenses/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
111
+ clarifai-11.4.9.dist-info/METADATA,sha256=ik-jR4kso026x30ICc47SoptmYdxq1Ut5lsUifmp55o,22682
112
+ clarifai-11.4.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
113
+ clarifai-11.4.9.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
114
+ clarifai-11.4.9.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
115
+ clarifai-11.4.9.dist-info/RECORD,,