clarifai 11.4.9__py3-none-any.whl → 11.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/runners/models/dummy_openai_model.py +59 -26
- clarifai/runners/models/model_builder.py +1 -0
- clarifai/runners/models/openai_class.py +89 -143
- clarifai/runners/models/visual_classifier_class.py +1 -1
- clarifai/runners/utils/code_script.py +3 -1
- clarifai/runners/utils/data_utils.py +7 -1
- {clarifai-11.4.9.dist-info → clarifai-11.5.0.dist-info}/METADATA +3 -3
- {clarifai-11.4.9.dist-info → clarifai-11.5.0.dist-info}/RECORD +13 -13
- {clarifai-11.4.9.dist-info → clarifai-11.5.0.dist-info}/WHEEL +0 -0
- {clarifai-11.4.9.dist-info → clarifai-11.5.0.dist-info}/entry_points.txt +0 -0
- {clarifai-11.4.9.dist-info → clarifai-11.5.0.dist-info}/licenses/LICENSE +0 -0
- {clarifai-11.4.9.dist-info → clarifai-11.5.0.dist-info}/top_level.txt +0 -0
clarifai/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "11.
|
1
|
+
__version__ = "11.5.0"
|
@@ -13,9 +13,9 @@ class MockOpenAIClient:
|
|
13
13
|
def create(self, **kwargs):
|
14
14
|
"""Mock create method for compatibility."""
|
15
15
|
if kwargs.get("stream", False):
|
16
|
-
return MockCompletionStream(kwargs
|
16
|
+
return MockCompletionStream(**kwargs)
|
17
17
|
else:
|
18
|
-
return MockCompletion(kwargs
|
18
|
+
return MockCompletion(**kwargs)
|
19
19
|
|
20
20
|
def __init__(self):
|
21
21
|
self.chat = self # Make self.chat point to self for compatibility
|
@@ -25,6 +25,19 @@ class MockOpenAIClient:
|
|
25
25
|
class MockCompletion:
|
26
26
|
"""Mock completion object that mimics the OpenAI completion response structure."""
|
27
27
|
|
28
|
+
class Usage:
|
29
|
+
def __init__(self, prompt_tokens, completion_tokens, total_tokens):
|
30
|
+
self.total_tokens = total_tokens
|
31
|
+
self.prompt_tokens = prompt_tokens
|
32
|
+
self.completion_tokens = completion_tokens
|
33
|
+
|
34
|
+
def to_dict(self):
|
35
|
+
return dict(
|
36
|
+
total_tokens=self.total_tokens,
|
37
|
+
prompt_tokens=self.prompt_tokens,
|
38
|
+
completion_tokens=self.completion_tokens,
|
39
|
+
)
|
40
|
+
|
28
41
|
class Choice:
|
29
42
|
class Message:
|
30
43
|
def __init__(self, content):
|
@@ -36,17 +49,21 @@ class MockCompletion:
|
|
36
49
|
self.finish_reason = "stop"
|
37
50
|
self.index = 0
|
38
51
|
|
39
|
-
def __init__(self,
|
52
|
+
def __init__(self, **kwargs):
|
40
53
|
# Generate a simple response based on the last message
|
54
|
+
messages = kwargs.get("messages")
|
41
55
|
last_message = messages[-1] if messages else {"content": ""}
|
42
56
|
response_text = f"Echo: {last_message.get('content', '')}"
|
43
57
|
|
44
58
|
self.choices = [self.Choice(response_text)]
|
45
|
-
self.usage =
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
59
|
+
self.usage = self.Usage(
|
60
|
+
**{
|
61
|
+
"prompt_tokens": len(str(messages)),
|
62
|
+
"completion_tokens": len(response_text),
|
63
|
+
"total_tokens": len(str(messages)) + len(response_text),
|
64
|
+
}
|
65
|
+
)
|
66
|
+
|
50
67
|
self.id = "dummy-completion-id"
|
51
68
|
self.created = 1234567890
|
52
69
|
self.model = "dummy-model"
|
@@ -65,9 +82,12 @@ class MockCompletion:
|
|
65
82
|
}
|
66
83
|
for choice in self.choices
|
67
84
|
],
|
68
|
-
"usage": self.usage,
|
85
|
+
"usage": self.usage.to_dict(),
|
69
86
|
}
|
70
87
|
|
88
|
+
def model_dump(self):
|
89
|
+
return self.to_dict()
|
90
|
+
|
71
91
|
|
72
92
|
class MockCompletionStream:
|
73
93
|
"""Mock completion stream that mimics the OpenAI streaming response structure."""
|
@@ -79,14 +99,27 @@ class MockCompletionStream:
|
|
79
99
|
self.content = content
|
80
100
|
self.role = "assistant" if content is None else None
|
81
101
|
|
102
|
+
class Usage:
|
103
|
+
def __init__(self, prompt_tokens, completion_tokens, total_tokens):
|
104
|
+
self.total_tokens = total_tokens
|
105
|
+
self.prompt_tokens = prompt_tokens
|
106
|
+
self.completion_tokens = completion_tokens
|
107
|
+
|
108
|
+
def to_dict(self):
|
109
|
+
return dict(
|
110
|
+
total_tokens=self.total_tokens,
|
111
|
+
prompt_tokens=self.prompt_tokens,
|
112
|
+
completion_tokens=self.completion_tokens,
|
113
|
+
)
|
114
|
+
|
82
115
|
def __init__(self, content=None, include_usage=False):
|
83
116
|
self.delta = self.Delta(content)
|
84
117
|
self.finish_reason = None if content else "stop"
|
85
118
|
self.index = 0
|
86
119
|
self.usage = (
|
87
|
-
{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}
|
120
|
+
self.Usage(**{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15})
|
88
121
|
if include_usage
|
89
|
-
else None
|
122
|
+
else self.Usage(None, None, None)
|
90
123
|
)
|
91
124
|
|
92
125
|
def __init__(self, content=None, include_usage=False):
|
@@ -114,11 +147,16 @@ class MockCompletionStream:
|
|
114
147
|
],
|
115
148
|
}
|
116
149
|
if self.usage:
|
117
|
-
result["usage"] = self.usage
|
150
|
+
result["usage"] = self.usage.to_dict()
|
118
151
|
return result
|
119
152
|
|
120
|
-
|
153
|
+
def model_dump(self):
|
154
|
+
return self.to_dict()
|
155
|
+
|
156
|
+
def __init__(self, **kwargs):
|
121
157
|
# Generate a simple response based on the last message
|
158
|
+
messages = kwargs.get("messages")
|
159
|
+
|
122
160
|
last_message = messages[-1] if messages else {"content": ""}
|
123
161
|
self.response_text = f"Echo: {last_message.get('content', '')}"
|
124
162
|
# Create chunks that ensure the full text is included in the first chunk
|
@@ -127,7 +165,7 @@ class MockCompletionStream:
|
|
127
165
|
"", # Final chunk is empty to indicate completion
|
128
166
|
]
|
129
167
|
self.current_chunk = 0
|
130
|
-
self.include_usage =
|
168
|
+
self.include_usage = kwargs.get("stream_options", {}).get("include_usage")
|
131
169
|
|
132
170
|
def __iter__(self):
|
133
171
|
return self
|
@@ -150,18 +188,14 @@ class DummyOpenAIModel(OpenAIModelClass):
|
|
150
188
|
def _process_request(self, **kwargs) -> Dict[str, Any]:
|
151
189
|
"""Process a request for non-streaming responses."""
|
152
190
|
completion_args = self._create_completion_args(kwargs)
|
153
|
-
return self.client.chat.completions.create(**completion_args).
|
191
|
+
return self.client.chat.completions.create(**completion_args).model_dump()
|
154
192
|
|
155
193
|
def _process_streaming_request(self, **kwargs) -> Iterator[Dict[str, Any]]:
|
156
194
|
"""Process a request for streaming responses."""
|
157
|
-
|
158
|
-
completion_stream = self.client.chat.completions.create(**completion_args)
|
159
|
-
completion_stream.include_usage = kwargs.get('stream_options', {}).get(
|
160
|
-
'include_usage', False
|
161
|
-
)
|
195
|
+
completion_stream = self.client.chat.completions.create(**kwargs)
|
162
196
|
|
163
197
|
for chunk in completion_stream:
|
164
|
-
yield chunk.
|
198
|
+
yield chunk.model_dump()
|
165
199
|
|
166
200
|
# Override the method directly for testing
|
167
201
|
@OpenAIModelClass.method
|
@@ -169,14 +203,13 @@ class DummyOpenAIModel(OpenAIModelClass):
|
|
169
203
|
"""Direct implementation for testing purposes."""
|
170
204
|
try:
|
171
205
|
request_data = json.loads(req)
|
172
|
-
|
173
|
-
|
206
|
+
request_data = self._create_completion_args(request_data)
|
174
207
|
# Validate messages
|
175
|
-
if not
|
208
|
+
if not request_data.get("messages"):
|
176
209
|
yield "Error: No messages provided"
|
177
210
|
return
|
178
211
|
|
179
|
-
for message in
|
212
|
+
for message in request_data["messages"]:
|
180
213
|
if (
|
181
214
|
not isinstance(message, dict)
|
182
215
|
or "role" not in message
|
@@ -185,7 +218,7 @@ class DummyOpenAIModel(OpenAIModelClass):
|
|
185
218
|
yield "Error: Invalid message format"
|
186
219
|
return
|
187
220
|
|
188
|
-
for chunk in self._process_streaming_request(**
|
221
|
+
for chunk in self._process_streaming_request(**request_data):
|
189
222
|
yield json.dumps(chunk)
|
190
223
|
except Exception as e:
|
191
224
|
yield f"Error: {str(e)}"
|
@@ -839,6 +839,7 @@ class ModelBuilder:
|
|
839
839
|
def get_model_version_proto(self):
|
840
840
|
signatures = self.get_method_signatures()
|
841
841
|
model_version_proto = resources_pb2.ModelVersion(
|
842
|
+
pretrained_model_config=resources_pb2.PretrainedModelConfig(),
|
842
843
|
inference_compute_info=self.inference_compute_info,
|
843
844
|
method_signatures=signatures,
|
844
845
|
)
|
@@ -20,6 +20,15 @@ class OpenAIModelClass(ModelClass):
|
|
20
20
|
model = "gpt-4"
|
21
21
|
"""
|
22
22
|
|
23
|
+
# API Endpoints
|
24
|
+
ENDPOINT_CHAT_COMPLETIONS = "/chat/completions"
|
25
|
+
ENDPOINT_IMAGES_GENERATE = "/images/generations"
|
26
|
+
ENDPOINT_EMBEDDINGS = "/embeddings"
|
27
|
+
ENDPOINT_RESPONSES = "/responses"
|
28
|
+
|
29
|
+
# Default endpoint
|
30
|
+
DEFAULT_ENDPOINT = ENDPOINT_CHAT_COMPLETIONS
|
31
|
+
|
23
32
|
# These should be overridden in subclasses
|
24
33
|
client = None
|
25
34
|
model = None
|
@@ -35,141 +44,100 @@ class OpenAIModelClass(ModelClass):
|
|
35
44
|
"Subclasses must set the 'model' class attribute or ensure the client can list models"
|
36
45
|
) from e
|
37
46
|
|
38
|
-
def
|
39
|
-
"""Extract and validate common openai arguments parameters from the request data.
|
40
|
-
|
41
|
-
Args:
|
42
|
-
request_data: The parsed JSON request data
|
43
|
-
|
44
|
-
Returns:
|
45
|
-
Dict containing the extracted parameters
|
46
|
-
"""
|
47
|
-
return {
|
48
|
-
"messages": request_data.get("messages", []),
|
49
|
-
"temperature": request_data.get("temperature", 1.0),
|
50
|
-
"max_tokens": request_data.get("max_tokens"),
|
51
|
-
"max_completion_tokens": request_data.get("max_completion_tokens"),
|
52
|
-
"n": request_data.get("n", 1),
|
53
|
-
"frequency_penalty": request_data.get("frequency_penalty"),
|
54
|
-
"presence_penalty": request_data.get("presence_penalty"),
|
55
|
-
"top_p": request_data.get("top_p", 1.0),
|
56
|
-
"reasoning_effort": request_data.get("reasoning_effort"),
|
57
|
-
"response_format": request_data.get("response_format"),
|
58
|
-
"stop": request_data.get("stop"),
|
59
|
-
"tools": request_data.get("tools"),
|
60
|
-
"tool_choice": request_data.get("tool_choice"),
|
61
|
-
"tool_resources": request_data.get("tool_resources"),
|
62
|
-
"modalities": request_data.get("modalities"),
|
63
|
-
"stream_options": request_data.get("stream_options", {"include_usage": True}),
|
64
|
-
}
|
65
|
-
|
66
|
-
def _create_completion_args(
|
67
|
-
self, params: Dict[str, Any], stream: bool = False
|
68
|
-
) -> Dict[str, Any]:
|
47
|
+
def _create_completion_args(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
69
48
|
"""Create the completion arguments dictionary from parameters.
|
70
49
|
|
71
50
|
Args:
|
72
51
|
params: Dictionary of parameters extracted from request
|
73
|
-
stream: Whether this is a streaming request
|
74
52
|
|
75
53
|
Returns:
|
76
54
|
Dict containing the completion arguments
|
77
55
|
"""
|
78
|
-
completion_args = {
|
79
|
-
|
80
|
-
|
81
|
-
"temperature": params["temperature"],
|
82
|
-
}
|
83
|
-
|
56
|
+
completion_args = {**params}
|
57
|
+
completion_args.update({"model": self.model})
|
58
|
+
stream = completion_args.pop("stream", False)
|
84
59
|
if stream:
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
optional_params = [
|
91
|
-
"max_tokens",
|
92
|
-
"max_completion_tokens",
|
93
|
-
"n",
|
94
|
-
"frequency_penalty",
|
95
|
-
"presence_penalty",
|
96
|
-
"top_p",
|
97
|
-
"reasoning_effort",
|
98
|
-
"response_format",
|
99
|
-
"stop",
|
100
|
-
"tools",
|
101
|
-
"tool_choice",
|
102
|
-
"tool_resources",
|
103
|
-
"modalities",
|
104
|
-
]
|
105
|
-
|
106
|
-
for param in optional_params:
|
107
|
-
if params.get(param) is not None:
|
108
|
-
completion_args[param] = params[param]
|
60
|
+
# Force to use usage
|
61
|
+
stream_options = params.pop("stream_options", {})
|
62
|
+
stream_options.update({"include_usage": True})
|
63
|
+
completion_args["stream_options"] = stream_options
|
64
|
+
completion_args["stream"] = stream
|
109
65
|
|
110
66
|
return completion_args
|
111
67
|
|
112
|
-
def
|
113
|
-
|
68
|
+
def _set_usage(self, resp):
|
69
|
+
if resp.usage and resp.usage.prompt_tokens and resp.usage.completion_tokens:
|
70
|
+
self.set_output_context(
|
71
|
+
prompt_tokens=resp.usage.prompt_tokens,
|
72
|
+
completion_tokens=resp.usage.completion_tokens,
|
73
|
+
)
|
74
|
+
|
75
|
+
def _handle_chat_completions(self, request_data: Dict[str, Any]):
|
76
|
+
"""Handle chat completion requests."""
|
77
|
+
completion_args = self._create_completion_args(request_data)
|
78
|
+
completion = self.client.chat.completions.create(**completion_args)
|
79
|
+
self._set_usage(completion)
|
80
|
+
return completion
|
81
|
+
|
82
|
+
def _handle_images_generate(self, request_data: Dict[str, Any]):
|
83
|
+
"""Handle image generation requests."""
|
84
|
+
image_args = {**request_data}
|
85
|
+
image_args.update({"model": self.model})
|
86
|
+
response = self.client.images.generate(**image_args)
|
87
|
+
return response
|
88
|
+
|
89
|
+
def _handle_embeddings(self, request_data: Dict[str, Any]):
|
90
|
+
"""Handle embedding requests."""
|
91
|
+
embedding_args = {**request_data}
|
92
|
+
embedding_args.update({"model": self.model})
|
93
|
+
response = self.client.embeddings.create(**embedding_args)
|
94
|
+
return response
|
95
|
+
|
96
|
+
def _handle_responses(self, request_data: Dict[str, Any]):
|
97
|
+
"""Handle response requests."""
|
98
|
+
response_args = {**request_data}
|
99
|
+
response_args.update({"model": self.model})
|
100
|
+
response = self.client.responses.create(**response_args)
|
101
|
+
return response
|
102
|
+
|
103
|
+
def _route_request(self, endpoint: str, request_data: Dict[str, Any]):
|
104
|
+
"""Route the request to appropriate handler based on endpoint."""
|
105
|
+
handlers = {
|
106
|
+
self.ENDPOINT_CHAT_COMPLETIONS: self._handle_chat_completions,
|
107
|
+
self.ENDPOINT_IMAGES_GENERATE: self._handle_images_generate,
|
108
|
+
self.ENDPOINT_EMBEDDINGS: self._handle_embeddings,
|
109
|
+
self.ENDPOINT_RESPONSES: self._handle_responses,
|
110
|
+
}
|
114
111
|
|
115
|
-
|
116
|
-
|
112
|
+
handler = handlers.get(endpoint)
|
113
|
+
if not handler:
|
114
|
+
raise ValueError(f"Unsupported endpoint: {endpoint}")
|
117
115
|
|
118
|
-
|
119
|
-
JSON string containing the error response
|
120
|
-
"""
|
121
|
-
error_response = {
|
122
|
-
"error": {
|
123
|
-
"message": str(error),
|
124
|
-
"type": "InvalidRequestError",
|
125
|
-
"code": "invalid_request_error",
|
126
|
-
}
|
127
|
-
}
|
128
|
-
return json.dumps(error_response)
|
116
|
+
return handler(request_data)
|
129
117
|
|
130
118
|
@ModelClass.method
|
131
119
|
def openai_transport(self, msg: str) -> str:
|
132
|
-
"""
|
133
|
-
then return its response.
|
120
|
+
"""Process an OpenAI-compatible request and send it to the appropriate OpenAI endpoint.
|
134
121
|
|
135
122
|
Args:
|
136
|
-
msg: JSON string containing the request parameters
|
123
|
+
msg: JSON string containing the request parameters including 'openai_endpoint'
|
137
124
|
|
138
125
|
Returns:
|
139
126
|
JSON string containing the response or error
|
140
127
|
"""
|
141
128
|
try:
|
142
129
|
request_data = json.loads(msg)
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
if stream:
|
147
|
-
chunks = self._process_streaming_request(**params)
|
148
|
-
response_list = []
|
149
|
-
for chunk in chunks:
|
150
|
-
response_list.append(chunk)
|
151
|
-
return json.dumps(response_list)
|
152
|
-
else:
|
153
|
-
completion = self._process_request(**params)
|
154
|
-
if completion.get('usage'):
|
155
|
-
if completion['usage'].get('prompt_tokens') and completion['usage'].get(
|
156
|
-
'completion_tokens'
|
157
|
-
):
|
158
|
-
self.set_output_context(
|
159
|
-
prompt_tokens=completion['usage']['prompt_tokens'],
|
160
|
-
completion_tokens=completion['usage']['completion_tokens'],
|
161
|
-
)
|
162
|
-
|
163
|
-
return json.dumps(completion)
|
164
|
-
|
130
|
+
endpoint = request_data.pop("openai_endpoint", self.DEFAULT_ENDPOINT)
|
131
|
+
response = self._route_request(endpoint, request_data)
|
132
|
+
return json.dumps(response.model_dump())
|
165
133
|
except Exception as e:
|
166
|
-
return
|
134
|
+
return f"Error: {e}"
|
167
135
|
|
168
136
|
@ModelClass.method
|
169
137
|
def openai_stream_transport(self, msg: str) -> Iterator[str]:
|
170
138
|
"""Process an OpenAI-compatible request and return a streaming response iterator.
|
171
139
|
This method is used when stream=True and returns an iterator of strings directly,
|
172
|
-
without converting to a list or JSON serializing.
|
140
|
+
without converting to a list or JSON serializing. Supports chat completions and responses endpoints.
|
173
141
|
|
174
142
|
Args:
|
175
143
|
msg: The request as a JSON string.
|
@@ -179,43 +147,21 @@ class OpenAIModelClass(ModelClass):
|
|
179
147
|
"""
|
180
148
|
try:
|
181
149
|
request_data = json.loads(msg)
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
"""Process a standard (non-streaming) request using the OpenAI client.
|
198
|
-
|
199
|
-
Args:
|
200
|
-
**kwargs: Request parameters
|
201
|
-
|
202
|
-
Returns:
|
203
|
-
The completion response from the OpenAI client
|
204
|
-
"""
|
205
|
-
completion_args = self._create_completion_args(kwargs)
|
206
|
-
return self.client.chat.completions.create(**completion_args).to_dict()
|
207
|
-
|
208
|
-
def _process_streaming_request(self, **kwargs) -> Iterator[str]:
|
209
|
-
"""Process a streaming request using the OpenAI client.
|
210
|
-
|
211
|
-
Args:
|
212
|
-
**kwargs: Request parameters
|
213
|
-
|
214
|
-
Returns:
|
215
|
-
Iterator yielding response chunks
|
216
|
-
"""
|
217
|
-
completion_args = self._create_completion_args(kwargs, stream=True)
|
218
|
-
completion_stream = self.client.chat.completions.create(**completion_args)
|
150
|
+
endpoint = request_data.pop("openai_endpoint", self.DEFAULT_ENDPOINT)
|
151
|
+
if endpoint not in [self.ENDPOINT_CHAT_COMPLETIONS, self.ENDPOINT_RESPONSES]:
|
152
|
+
raise ValueError("Streaming is only supported for chat completions and responses.")
|
153
|
+
|
154
|
+
if endpoint == self.ENDPOINT_RESPONSES:
|
155
|
+
# Handle responses endpoint
|
156
|
+
stream_response = self._route_request(endpoint, request_data)
|
157
|
+
for chunk in stream_response:
|
158
|
+
yield json.dumps(chunk.model_dump())
|
159
|
+
else:
|
160
|
+
completion_args = self._create_completion_args(request_data)
|
161
|
+
stream_completion = self.client.chat.completions.create(**completion_args)
|
162
|
+
for chunk in stream_completion:
|
163
|
+
self._set_usage(chunk)
|
164
|
+
yield json.dumps(chunk.model_dump())
|
219
165
|
|
220
|
-
|
221
|
-
yield
|
166
|
+
except Exception as e:
|
167
|
+
yield f"Error: {e}"
|
@@ -52,7 +52,7 @@ class VisualClassifierClass(ModelClass):
|
|
52
52
|
|
53
53
|
@staticmethod
|
54
54
|
def process_concepts(
|
55
|
-
logits: torch.Tensor,
|
55
|
+
logits: torch.Tensor, model_labels: Dict[int, str]
|
56
56
|
) -> List[List[Concept]]:
|
57
57
|
"""Convert model logits into a structured format of concepts.
|
58
58
|
|
@@ -111,7 +111,7 @@ model = Model.from_current_context()"""
|
|
111
111
|
else:
|
112
112
|
model_ui_url = url_helper.clarifai_url(user_id, app_id, "models", model_id)
|
113
113
|
model_section = f"""
|
114
|
-
model = Model({model_ui_url},
|
114
|
+
model = Model("{model_ui_url}",
|
115
115
|
deployment_id = {deployment_id}, # Only needed for dedicated deployed models
|
116
116
|
{base_url_str}
|
117
117
|
)
|
@@ -133,6 +133,8 @@ model = Model({model_ui_url},
|
|
133
133
|
continue
|
134
134
|
if default_value is None and required:
|
135
135
|
default_value = _set_default_value(param_type)
|
136
|
+
if param_type == "str" and default_value is not None:
|
137
|
+
default_value = json.dumps(default_value)
|
136
138
|
client_script_str += f"{param_name}={default_value}, "
|
137
139
|
client_script_str = client_script_str.rstrip(", ") + ")"
|
138
140
|
if method_signature.method_type == resources_pb2.RunnerMethodType.UNARY_UNARY:
|
@@ -377,8 +377,14 @@ class Param(MessageData):
|
|
377
377
|
|
378
378
|
if proto is None:
|
379
379
|
proto = ParamProto()
|
380
|
-
|
380
|
+
|
381
|
+
if isinstance(default, str):
|
382
|
+
proto.default = default
|
383
|
+
else:
|
384
|
+
proto.default = json.dumps(default)
|
385
|
+
|
381
386
|
return proto
|
387
|
+
|
382
388
|
except Exception:
|
383
389
|
if default is not None:
|
384
390
|
proto.default = str(default)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: clarifai
|
3
|
-
Version: 11.
|
3
|
+
Version: 11.5.0
|
4
4
|
Home-page: https://github.com/Clarifai/clarifai-python
|
5
5
|
Author: Clarifai
|
6
6
|
Author-email: support@clarifai.com
|
@@ -19,8 +19,8 @@ Classifier: Operating System :: OS Independent
|
|
19
19
|
Requires-Python: >=3.8
|
20
20
|
Description-Content-Type: text/markdown
|
21
21
|
License-File: LICENSE
|
22
|
-
Requires-Dist: clarifai-grpc>=11.
|
23
|
-
Requires-Dist: clarifai-protocol>=0.0.
|
22
|
+
Requires-Dist: clarifai-grpc>=11.5.5
|
23
|
+
Requires-Dist: clarifai-protocol>=0.0.24
|
24
24
|
Requires-Dist: numpy>=1.22.0
|
25
25
|
Requires-Dist: tqdm>=4.65.0
|
26
26
|
Requires-Dist: PyYAML>=6.0.1
|
@@ -1,4 +1,4 @@
|
|
1
|
-
clarifai/__init__.py,sha256=
|
1
|
+
clarifai/__init__.py,sha256=89g_4HXr4XouZfNDdwEbm-Xwu06Vvu-EUBmo-baEVQQ,23
|
2
2
|
clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
clarifai/errors.py,sha256=GXa6D4v_L404J83jnRNFPH7s-1V9lk7w6Ws99f1g-AY,2772
|
4
4
|
clarifai/versions.py,sha256=ecSuEB_nOL2XSoYHDw2n23XUbm_KPOGjudMXmQrGdS8,224
|
@@ -68,20 +68,20 @@ clarifai/runners/__init__.py,sha256=CQhpUOj_x-oV9xEUKdL-hi3A1BQAtPUv-FFOev4a96w,
|
|
68
68
|
clarifai/runners/server.py,sha256=9qVAs8pRHmtyY0RCNIQ1uP8nqDADIFZ03LnkoDt1h4U,4692
|
69
69
|
clarifai/runners/dockerfile_template/Dockerfile.template,sha256=5cjv7U8PmWa3DB_5B1CqSYh_6GE0E0np52TIAa7EIDE,2312
|
70
70
|
clarifai/runners/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
71
|
-
clarifai/runners/models/dummy_openai_model.py,sha256=
|
71
|
+
clarifai/runners/models/dummy_openai_model.py,sha256=pcmAVbqTTGG4J3BLVjKfvM_SQ-GET_XexIUdLcr9Zvo,8373
|
72
72
|
clarifai/runners/models/mcp_class.py,sha256=7uwCMade0LMMBq7vczhPf4Kxdmh8Rj0R7Pg3pPxYdjQ,6386
|
73
|
-
clarifai/runners/models/model_builder.py,sha256=
|
73
|
+
clarifai/runners/models/model_builder.py,sha256=PiqPyTGPSKsYvOQNpBzs4e1_wuEbtE-P3yEkLE4Py10,49231
|
74
74
|
clarifai/runners/models/model_class.py,sha256=-euUF-eHUi4KXR_e1pIwvToDZ13CM6TSz2FolzildjM,16069
|
75
75
|
clarifai/runners/models/model_run_locally.py,sha256=6-6WjEKc0ba3gAv4wOLdMs2XOzS3b-2bZHJS0wdVqJY,20088
|
76
76
|
clarifai/runners/models/model_runner.py,sha256=SccX-RxTgruSpQaM21uMSl-z1x6fOa13fQZMQW8NNRY,7297
|
77
77
|
clarifai/runners/models/model_servicer.py,sha256=rRd_fNEXwqiBSzTUtPI2r07EBdcCPd8tcSPHeqTe0_I,3445
|
78
|
-
clarifai/runners/models/openai_class.py,sha256=
|
79
|
-
clarifai/runners/models/visual_classifier_class.py,sha256=
|
78
|
+
clarifai/runners/models/openai_class.py,sha256=aXlk5W6LWkh-A4eZYi74DeLW0i_86_9DYYGxpJHXI0w,6688
|
79
|
+
clarifai/runners/models/visual_classifier_class.py,sha256=1ZoLfCT2crrgRbejjTMAIwpTRgQMiH9N9yflOVpFxSg,2721
|
80
80
|
clarifai/runners/models/visual_detector_class.py,sha256=ky4oFAkGCKPpGPdgaOso-n6D3HcmnbKee_8hBsNiV8U,2883
|
81
81
|
clarifai/runners/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
82
|
-
clarifai/runners/utils/code_script.py,sha256=
|
82
|
+
clarifai/runners/utils/code_script.py,sha256=Ae1u2686ycxDGdYXnPzVvDxhc5i-JZ_A4GDpq0xHvjo,12772
|
83
83
|
clarifai/runners/utils/const.py,sha256=Q4Ps6gIEJCyTdQCfmT6PaS61WHmhT25XigV1NugWz-E,1544
|
84
|
-
clarifai/runners/utils/data_utils.py,sha256=
|
84
|
+
clarifai/runners/utils/data_utils.py,sha256=HRpMYR2O0OiDpXXhOManLHTeomC4bFnXMHVAiT_12yE,20856
|
85
85
|
clarifai/runners/utils/loader.py,sha256=K5Y8MPbIe5STw2gDnrL8KqFgKNxEo7bz-RV0ip1T4PM,10900
|
86
86
|
clarifai/runners/utils/method_signatures.py,sha256=qdHaO8ZIgP6BBXXMhMPhcQ46dse-XMP2t4VJCNG7O3Q,18335
|
87
87
|
clarifai/runners/utils/openai_convertor.py,sha256=ZlIrvvfHttD_DavLvmKZdL8gNq_TQvQtZVnYamwdWz4,8248
|
@@ -107,9 +107,9 @@ clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
107
107
|
clarifai/workflows/export.py,sha256=Oq3RVNKvv1iH46U6oIjXa-MXWJ4sTlXr_NSfwoxr3H4,2149
|
108
108
|
clarifai/workflows/utils.py,sha256=ESL3INcouNcLKCh-nMpfXX-YbtCzX7tz7hT57_RGQ3M,2079
|
109
109
|
clarifai/workflows/validate.py,sha256=UhmukyHkfxiMFrPPeBdUTiCOHQT5-shqivlBYEyKTlU,2931
|
110
|
-
clarifai-11.
|
111
|
-
clarifai-11.
|
112
|
-
clarifai-11.
|
113
|
-
clarifai-11.
|
114
|
-
clarifai-11.
|
115
|
-
clarifai-11.
|
110
|
+
clarifai-11.5.0.dist-info/licenses/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
|
111
|
+
clarifai-11.5.0.dist-info/METADATA,sha256=nHyy0pNlitK61Ym0VVAxz-Ev7UEtqpjyKrpbHwc2jmQ,22682
|
112
|
+
clarifai-11.5.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
113
|
+
clarifai-11.5.0.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
|
114
|
+
clarifai-11.5.0.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
|
115
|
+
clarifai-11.5.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|