clarifai 11.4.4__py3-none-any.whl → 11.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/runners/__init__.py +2 -0
- clarifai/runners/models/dummy_openai_model.py +197 -0
- clarifai/runners/models/mcp_class.py +16 -7
- clarifai/runners/models/openai_class.py +219 -0
- clarifai/runners/utils/code_script.py +1 -4
- {clarifai-11.4.4.dist-info → clarifai-11.4.5.dist-info}/METADATA +1 -1
- {clarifai-11.4.4.dist-info → clarifai-11.4.5.dist-info}/RECORD +12 -10
- {clarifai-11.4.4.dist-info → clarifai-11.4.5.dist-info}/WHEEL +1 -1
- {clarifai-11.4.4.dist-info → clarifai-11.4.5.dist-info}/entry_points.txt +0 -0
- {clarifai-11.4.4.dist-info → clarifai-11.4.5.dist-info}/licenses/LICENSE +0 -0
- {clarifai-11.4.4.dist-info → clarifai-11.4.5.dist-info}/top_level.txt +0 -0
clarifai/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "11.4.
|
1
|
+
__version__ = "11.4.5"
|
clarifai/runners/__init__.py
CHANGED
@@ -1,9 +1,11 @@
|
|
1
1
|
from .models.model_builder import ModelBuilder
|
2
2
|
from .models.model_class import ModelClass
|
3
3
|
from .models.model_runner import ModelRunner
|
4
|
+
from .models.openai_class import OpenAIModelClass
|
4
5
|
|
5
6
|
__all__ = [
|
6
7
|
"ModelRunner",
|
7
8
|
"ModelBuilder",
|
8
9
|
"ModelClass",
|
10
|
+
"OpenAIModelClass",
|
9
11
|
]
|
@@ -0,0 +1,197 @@
|
|
1
|
+
"""Dummy OpenAI model implementation for testing."""
|
2
|
+
|
3
|
+
import json
|
4
|
+
from typing import Any, Dict, Iterator
|
5
|
+
|
6
|
+
from clarifai.runners.models.openai_class import OpenAIModelClass
|
7
|
+
|
8
|
+
|
9
|
+
class MockOpenAIClient:
|
10
|
+
"""Mock OpenAI client for testing."""
|
11
|
+
|
12
|
+
class Completions:
|
13
|
+
def create(self, **kwargs):
|
14
|
+
"""Mock create method for compatibility."""
|
15
|
+
if kwargs.get("stream", False):
|
16
|
+
return MockCompletionStream(kwargs.get("messages", []))
|
17
|
+
else:
|
18
|
+
return MockCompletion(kwargs.get("messages", []))
|
19
|
+
|
20
|
+
def __init__(self):
|
21
|
+
self.chat = self # Make self.chat point to self for compatibility
|
22
|
+
self.completions = self.Completions() # For compatibility with some clients
|
23
|
+
|
24
|
+
|
25
|
+
class MockCompletion:
|
26
|
+
"""Mock completion object that mimics the OpenAI completion response structure."""
|
27
|
+
|
28
|
+
class Choice:
|
29
|
+
class Message:
|
30
|
+
def __init__(self, content):
|
31
|
+
self.content = content
|
32
|
+
self.role = "assistant"
|
33
|
+
|
34
|
+
def __init__(self, content):
|
35
|
+
self.message = self.Message(content)
|
36
|
+
self.finish_reason = "stop"
|
37
|
+
self.index = 0
|
38
|
+
|
39
|
+
def __init__(self, messages):
|
40
|
+
# Generate a simple response based on the last message
|
41
|
+
last_message = messages[-1] if messages else {"content": ""}
|
42
|
+
response_text = f"Echo: {last_message.get('content', '')}"
|
43
|
+
|
44
|
+
self.choices = [self.Choice(response_text)]
|
45
|
+
self.usage = {
|
46
|
+
"prompt_tokens": len(str(messages)),
|
47
|
+
"completion_tokens": len(response_text),
|
48
|
+
"total_tokens": len(str(messages)) + len(response_text),
|
49
|
+
}
|
50
|
+
self.id = "dummy-completion-id"
|
51
|
+
self.created = 1234567890
|
52
|
+
self.model = "dummy-model"
|
53
|
+
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
55
|
+
"""Convert the completion object to a dictionary."""
|
56
|
+
return {
|
57
|
+
"id": self.id,
|
58
|
+
"created": self.created,
|
59
|
+
"model": self.model,
|
60
|
+
"choices": [
|
61
|
+
{
|
62
|
+
"message": {"role": choice.message.role, "content": choice.message.content},
|
63
|
+
"finish_reason": choice.finish_reason,
|
64
|
+
"index": choice.index,
|
65
|
+
}
|
66
|
+
for choice in self.choices
|
67
|
+
],
|
68
|
+
"usage": self.usage,
|
69
|
+
}
|
70
|
+
|
71
|
+
|
72
|
+
class MockCompletionStream:
|
73
|
+
"""Mock completion stream that mimics the OpenAI streaming response structure."""
|
74
|
+
|
75
|
+
class Chunk:
|
76
|
+
class Choice:
|
77
|
+
class Delta:
|
78
|
+
def __init__(self, content=None):
|
79
|
+
self.content = content
|
80
|
+
self.role = "assistant" if content is None else None
|
81
|
+
|
82
|
+
def __init__(self, content=None, include_usage=False):
|
83
|
+
self.delta = self.Delta(content)
|
84
|
+
self.finish_reason = None if content else "stop"
|
85
|
+
self.index = 0
|
86
|
+
self.usage = (
|
87
|
+
{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}
|
88
|
+
if include_usage
|
89
|
+
else None
|
90
|
+
)
|
91
|
+
|
92
|
+
def __init__(self, content=None, include_usage=False):
|
93
|
+
self.choices = [self.Choice(content, include_usage)]
|
94
|
+
self.id = "dummy-chunk-id"
|
95
|
+
self.created = 1234567890
|
96
|
+
self.model = "dummy-model"
|
97
|
+
self.usage = self.choices[0].usage
|
98
|
+
|
99
|
+
def to_dict(self) -> Dict[str, Any]:
|
100
|
+
"""Convert the chunk to a dictionary."""
|
101
|
+
result = {
|
102
|
+
"id": self.id,
|
103
|
+
"created": self.created,
|
104
|
+
"model": self.model,
|
105
|
+
"choices": [
|
106
|
+
{
|
107
|
+
"delta": {"role": choice.delta.role, "content": choice.delta.content}
|
108
|
+
if choice.delta.content is not None
|
109
|
+
else {"role": choice.delta.role},
|
110
|
+
"finish_reason": choice.finish_reason,
|
111
|
+
"index": choice.index,
|
112
|
+
}
|
113
|
+
for choice in self.choices
|
114
|
+
],
|
115
|
+
}
|
116
|
+
if self.usage:
|
117
|
+
result["usage"] = self.usage
|
118
|
+
return result
|
119
|
+
|
120
|
+
def __init__(self, messages):
|
121
|
+
# Generate a simple response based on the last message
|
122
|
+
last_message = messages[-1] if messages else {"content": ""}
|
123
|
+
self.response_text = f"Echo: {last_message.get('content', '')}"
|
124
|
+
# Create chunks that ensure the full text is included in the first chunk
|
125
|
+
self.chunks = [
|
126
|
+
self.response_text, # First chunk contains the full text
|
127
|
+
"", # Final chunk is empty to indicate completion
|
128
|
+
]
|
129
|
+
self.current_chunk = 0
|
130
|
+
self.include_usage = False
|
131
|
+
|
132
|
+
def __iter__(self):
|
133
|
+
return self
|
134
|
+
|
135
|
+
def __next__(self):
|
136
|
+
if self.current_chunk < len(self.chunks):
|
137
|
+
chunk = self.Chunk(self.chunks[self.current_chunk], self.include_usage)
|
138
|
+
self.current_chunk += 1
|
139
|
+
return chunk
|
140
|
+
else:
|
141
|
+
raise StopIteration
|
142
|
+
|
143
|
+
|
144
|
+
class DummyOpenAIModel(OpenAIModelClass):
|
145
|
+
"""Dummy OpenAI model implementation for testing."""
|
146
|
+
|
147
|
+
openai_client = MockOpenAIClient()
|
148
|
+
model_name = "dummy-model"
|
149
|
+
|
150
|
+
def _process_request(self, **kwargs) -> Dict[str, Any]:
|
151
|
+
"""Process a request for non-streaming responses."""
|
152
|
+
completion_args = self._create_completion_args(kwargs)
|
153
|
+
return self.client.chat.completions.create(**completion_args).to_dict()
|
154
|
+
|
155
|
+
def _process_streaming_request(self, **kwargs) -> Iterator[Dict[str, Any]]:
|
156
|
+
"""Process a request for streaming responses."""
|
157
|
+
completion_args = self._create_completion_args(kwargs, stream=True)
|
158
|
+
completion_stream = self.client.chat.completions.create(**completion_args)
|
159
|
+
completion_stream.include_usage = kwargs.get('stream_options', {}).get(
|
160
|
+
'include_usage', False
|
161
|
+
)
|
162
|
+
|
163
|
+
for chunk in completion_stream:
|
164
|
+
yield chunk.to_dict()
|
165
|
+
|
166
|
+
# Override the method directly for testing
|
167
|
+
@OpenAIModelClass.method
|
168
|
+
def openai_stream_transport(self, req: str) -> Iterator[str]:
|
169
|
+
"""Direct implementation for testing purposes."""
|
170
|
+
try:
|
171
|
+
request_data = json.loads(req)
|
172
|
+
params = self._extract_request_params(request_data)
|
173
|
+
|
174
|
+
# Validate messages
|
175
|
+
if not params.get("messages"):
|
176
|
+
yield "Error: No messages provided"
|
177
|
+
return
|
178
|
+
|
179
|
+
for message in params["messages"]:
|
180
|
+
if (
|
181
|
+
not isinstance(message, dict)
|
182
|
+
or "role" not in message
|
183
|
+
or "content" not in message
|
184
|
+
):
|
185
|
+
yield "Error: Invalid message format"
|
186
|
+
return
|
187
|
+
|
188
|
+
for chunk in self._process_streaming_request(**params):
|
189
|
+
yield json.dumps(chunk)
|
190
|
+
except Exception as e:
|
191
|
+
yield f"Error: {str(e)}"
|
192
|
+
|
193
|
+
# Additional example method that could be added for specific model implementations
|
194
|
+
@OpenAIModelClass.method
|
195
|
+
def test_method(self, prompt: str) -> str:
|
196
|
+
"""Test method that simply echoes the input."""
|
197
|
+
return f"Test: {prompt}"
|
@@ -102,13 +102,22 @@ class MCPModelClass(ModelClass):
|
|
102
102
|
# If we have an id it's a JSONRPCRequest
|
103
103
|
if not d.get('method', '').startswith("notifications/"):
|
104
104
|
client_message = types.ClientRequest.model_validate(d)
|
105
|
+
# Note(zeiler): this response is the "result" field of the JSONRPCResponse.
|
106
|
+
# the API will fill in the "id" and "jsonrpc" fields.
|
105
107
|
response = asyncio.run(send_request(client_message, id=id))
|
108
|
+
if response is None:
|
109
|
+
response = types.JSONRPCError(
|
110
|
+
jsonrpc="2.0",
|
111
|
+
id=id,
|
112
|
+
error=types.ErrorData(
|
113
|
+
code=types.INTERNAL_ERROR, message="Got empty response from MCP server."
|
114
|
+
),
|
115
|
+
)
|
116
|
+
# return as a serialized json string
|
117
|
+
res = response.model_dump_json(by_alias=True, exclude_none=True)
|
118
|
+
return res
|
106
119
|
else: # JSONRPCRequest
|
107
120
|
client_message = types.ClientNotification.model_validate(d)
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
jsonrpc="2.0", id=id, error="Got empty response from MCP server."
|
112
|
-
)
|
113
|
-
# return as a serialized json string
|
114
|
-
return response.model_dump_json(by_alias=True, exclude_none=True)
|
121
|
+
# send_notification returns None always so nothing to return.
|
122
|
+
asyncio.run(send_notification(client_message))
|
123
|
+
return "{}"
|
@@ -0,0 +1,219 @@
|
|
1
|
+
"""Base class for creating OpenAI-compatible API server."""
|
2
|
+
|
3
|
+
import json
|
4
|
+
from typing import Any, Dict, Iterator
|
5
|
+
|
6
|
+
from clarifai.runners.models.model_class import ModelClass
|
7
|
+
|
8
|
+
|
9
|
+
class OpenAIModelClass(ModelClass):
|
10
|
+
"""Base class for wrapping OpenAI-compatible servers as a model running in Clarifai.
|
11
|
+
This handles all the transport between the API and the OpenAI-compatible server.
|
12
|
+
|
13
|
+
To use this class, create a subclass and set the following class attributes:
|
14
|
+
- openai_client: The OpenAI-compatible client instance
|
15
|
+
- model_name: The name of the model to use with the client
|
16
|
+
|
17
|
+
Example:
|
18
|
+
class MyOpenAIModel(OpenAIModelClass):
|
19
|
+
openai_client = OpenAI(api_key="your-key")
|
20
|
+
model_name = "gpt-4"
|
21
|
+
"""
|
22
|
+
|
23
|
+
# These should be overridden in subclasses
|
24
|
+
openai_client = None
|
25
|
+
model_name = None
|
26
|
+
|
27
|
+
def __init__(self) -> None:
|
28
|
+
if self.openai_client is None:
|
29
|
+
raise NotImplementedError("Subclasses must set the 'openai_client' class attribute")
|
30
|
+
if self.model_name is None:
|
31
|
+
self.model_name = self.openai_client.models.list().data[0].id
|
32
|
+
|
33
|
+
self.client = self.openai_client
|
34
|
+
self.model = self.model_name
|
35
|
+
|
36
|
+
def _extract_request_params(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
|
37
|
+
"""Extract and validate common openai arguments parameters from the request data.
|
38
|
+
|
39
|
+
Args:
|
40
|
+
request_data: The parsed JSON request data
|
41
|
+
|
42
|
+
Returns:
|
43
|
+
Dict containing the extracted parameters
|
44
|
+
"""
|
45
|
+
return {
|
46
|
+
"messages": request_data.get("messages", []),
|
47
|
+
"temperature": request_data.get("temperature", 1.0),
|
48
|
+
"max_tokens": request_data.get("max_tokens"),
|
49
|
+
"max_completion_tokens": request_data.get("max_completion_tokens"),
|
50
|
+
"n": request_data.get("n", 1),
|
51
|
+
"frequency_penalty": request_data.get("frequency_penalty"),
|
52
|
+
"presence_penalty": request_data.get("presence_penalty"),
|
53
|
+
"top_p": request_data.get("top_p", 1.0),
|
54
|
+
"reasoning_effort": request_data.get("reasoning_effort"),
|
55
|
+
"response_format": request_data.get("response_format"),
|
56
|
+
"stop": request_data.get("stop"),
|
57
|
+
"tools": request_data.get("tools"),
|
58
|
+
"tool_choice": request_data.get("tool_choice"),
|
59
|
+
"tool_resources": request_data.get("tool_resources"),
|
60
|
+
"modalities": request_data.get("modalities"),
|
61
|
+
"stream_options": request_data.get("stream_options", {"include_usage": True}),
|
62
|
+
}
|
63
|
+
|
64
|
+
def _create_completion_args(
|
65
|
+
self, params: Dict[str, Any], stream: bool = False
|
66
|
+
) -> Dict[str, Any]:
|
67
|
+
"""Create the completion arguments dictionary from parameters.
|
68
|
+
|
69
|
+
Args:
|
70
|
+
params: Dictionary of parameters extracted from request
|
71
|
+
stream: Whether this is a streaming request
|
72
|
+
|
73
|
+
Returns:
|
74
|
+
Dict containing the completion arguments
|
75
|
+
"""
|
76
|
+
completion_args = {
|
77
|
+
"model": self.model,
|
78
|
+
"messages": params["messages"],
|
79
|
+
"temperature": params["temperature"],
|
80
|
+
}
|
81
|
+
|
82
|
+
if stream:
|
83
|
+
completion_args["stream"] = True
|
84
|
+
if params.get("stream_options"):
|
85
|
+
completion_args["stream_options"] = params["stream_options"]
|
86
|
+
|
87
|
+
# Add optional parameters if they exist
|
88
|
+
optional_params = [
|
89
|
+
"max_tokens",
|
90
|
+
"max_completion_tokens",
|
91
|
+
"n",
|
92
|
+
"frequency_penalty",
|
93
|
+
"presence_penalty",
|
94
|
+
"top_p",
|
95
|
+
"reasoning_effort",
|
96
|
+
"response_format",
|
97
|
+
"stop",
|
98
|
+
"tools",
|
99
|
+
"tool_choice",
|
100
|
+
"tool_resources",
|
101
|
+
"modalities",
|
102
|
+
]
|
103
|
+
|
104
|
+
for param in optional_params:
|
105
|
+
if params.get(param) is not None:
|
106
|
+
completion_args[param] = params[param]
|
107
|
+
|
108
|
+
return completion_args
|
109
|
+
|
110
|
+
def _format_error_response(self, error: Exception) -> str:
|
111
|
+
"""Format an error response in OpenAI-compatible format.
|
112
|
+
|
113
|
+
Args:
|
114
|
+
error: The exception that occurred
|
115
|
+
|
116
|
+
Returns:
|
117
|
+
JSON string containing the error response
|
118
|
+
"""
|
119
|
+
error_response = {
|
120
|
+
"error": {
|
121
|
+
"message": str(error),
|
122
|
+
"type": "InvalidRequestError",
|
123
|
+
"code": "invalid_request_error",
|
124
|
+
}
|
125
|
+
}
|
126
|
+
return json.dumps(error_response)
|
127
|
+
|
128
|
+
@ModelClass.method
|
129
|
+
def openai_transport(self, msg: str) -> str:
|
130
|
+
"""The single model method to get the OpenAI-compatible request and send it to the OpenAI server
|
131
|
+
then return its response.
|
132
|
+
|
133
|
+
Args:
|
134
|
+
msg: JSON string containing the request parameters
|
135
|
+
|
136
|
+
Returns:
|
137
|
+
JSON string containing the response or error
|
138
|
+
"""
|
139
|
+
try:
|
140
|
+
request_data = json.loads(msg)
|
141
|
+
params = self._extract_request_params(request_data)
|
142
|
+
stream = request_data.get("stream", False)
|
143
|
+
|
144
|
+
if stream:
|
145
|
+
chunks = self._process_streaming_request(**params)
|
146
|
+
response_list = []
|
147
|
+
for chunk in chunks:
|
148
|
+
response_list.append(chunk)
|
149
|
+
return json.dumps(response_list)
|
150
|
+
else:
|
151
|
+
completion = self._process_request(**params)
|
152
|
+
if completion.get('usage'):
|
153
|
+
if completion['usage'].get('prompt_tokens') and completion['usage'].get(
|
154
|
+
'completion_tokens'
|
155
|
+
):
|
156
|
+
self.set_output_context(
|
157
|
+
prompt_tokens=completion['usage']['prompt_tokens'],
|
158
|
+
completion_tokens=completion['usage']['completion_tokens'],
|
159
|
+
)
|
160
|
+
|
161
|
+
return json.dumps(completion)
|
162
|
+
|
163
|
+
except Exception as e:
|
164
|
+
return self._format_error_response(e)
|
165
|
+
|
166
|
+
@ModelClass.method
|
167
|
+
def openai_stream_transport(self, msg: str) -> Iterator[str]:
|
168
|
+
"""Process an OpenAI-compatible request and return a streaming response iterator.
|
169
|
+
This method is used when stream=True and returns an iterator of strings directly,
|
170
|
+
without converting to a list or JSON serializing.
|
171
|
+
|
172
|
+
Args:
|
173
|
+
msg: The request as a JSON string.
|
174
|
+
|
175
|
+
Returns:
|
176
|
+
Iterator[str]: An iterator yielding text chunks from the streaming response.
|
177
|
+
"""
|
178
|
+
try:
|
179
|
+
request_data = json.loads(msg)
|
180
|
+
params = self._extract_request_params(request_data)
|
181
|
+
for chunk in self._process_streaming_request(**params):
|
182
|
+
if chunk.get('usage'):
|
183
|
+
if chunk['usage'].get('prompt_tokens') and chunk['usage'].get(
|
184
|
+
'completion_tokens'
|
185
|
+
):
|
186
|
+
self.set_output_context(
|
187
|
+
prompt_tokens=chunk['usage']['prompt_tokens'],
|
188
|
+
completion_tokens=chunk['usage']['completion_tokens'],
|
189
|
+
)
|
190
|
+
yield json.dumps(chunk)
|
191
|
+
except Exception as e:
|
192
|
+
yield f"Error: {str(e)}"
|
193
|
+
|
194
|
+
def _process_request(self, **kwargs) -> Any:
|
195
|
+
"""Process a standard (non-streaming) request using the OpenAI client.
|
196
|
+
|
197
|
+
Args:
|
198
|
+
**kwargs: Request parameters
|
199
|
+
|
200
|
+
Returns:
|
201
|
+
The completion response from the OpenAI client
|
202
|
+
"""
|
203
|
+
completion_args = self._create_completion_args(kwargs)
|
204
|
+
return self.client.chat.completions.create(**completion_args).to_dict()
|
205
|
+
|
206
|
+
def _process_streaming_request(self, **kwargs) -> Iterator[str]:
|
207
|
+
"""Process a streaming request using the OpenAI client.
|
208
|
+
|
209
|
+
Args:
|
210
|
+
**kwargs: Request parameters
|
211
|
+
|
212
|
+
Returns:
|
213
|
+
Iterator yielding response chunks
|
214
|
+
"""
|
215
|
+
completion_args = self._create_completion_args(kwargs, stream=True)
|
216
|
+
completion_stream = self.client.chat.completions.create(**completion_args)
|
217
|
+
|
218
|
+
for chunk in completion_stream:
|
219
|
+
yield chunk.to_dict()
|
@@ -88,9 +88,6 @@ model = Model({model_ui_url},
|
|
88
88
|
client_script_str = f'response = model.{method_name}('
|
89
89
|
annotations = _get_annotations_source(method_signature)
|
90
90
|
for param_name, (param_type, default_value, required) in annotations.items():
|
91
|
-
print(
|
92
|
-
f"param_name: {param_name}, param_type: {param_type}, default_value: {default_value}"
|
93
|
-
)
|
94
91
|
if param_name == "return":
|
95
92
|
continue
|
96
93
|
if default_value is None and required:
|
@@ -262,7 +259,7 @@ def _parse_default_value(field: resources_pb2.ModelTypeField):
|
|
262
259
|
elif data_type == resources_pb2.ModelTypeField.DataType.BOOL:
|
263
260
|
return 'True' if default_str.lower() == 'true' else 'False'
|
264
261
|
elif data_type == resources_pb2.ModelTypeField.DataType.STR:
|
265
|
-
return
|
262
|
+
return default_str
|
266
263
|
elif data_type == resources_pb2.ModelTypeField.DataType.BYTES:
|
267
264
|
return f"b{repr(default_str.encode('utf-8'))}"
|
268
265
|
elif data_type == resources_pb2.ModelTypeField.DataType.JSON_DATA:
|
@@ -1,4 +1,4 @@
|
|
1
|
-
clarifai/__init__.py,sha256=
|
1
|
+
clarifai/__init__.py,sha256=kV7nbYrWnc8FLLQtKI_7asjBj6k3ELmICuzhSmhU4Mk,23
|
2
2
|
clarifai/cli.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
clarifai/errors.py,sha256=GXa6D4v_L404J83jnRNFPH7s-1V9lk7w6Ws99f1g-AY,2772
|
4
4
|
clarifai/versions.py,sha256=ecSuEB_nOL2XSoYHDw2n23XUbm_KPOGjudMXmQrGdS8,224
|
@@ -63,20 +63,22 @@ clarifai/modules/style.css,sha256=j7FNPZVhLPj35vvBksAJ90RuX5sLuqzDR5iM2WIEhiA,60
|
|
63
63
|
clarifai/rag/__init__.py,sha256=wu3PzAzo7uqgrEzuaC9lY_3gj1HFiR3GU3elZIKTT5g,40
|
64
64
|
clarifai/rag/rag.py,sha256=EG3GoFrHFCmA70Tz49_0Jo1-3WIaHSgWGHecPeErcdc,14170
|
65
65
|
clarifai/rag/utils.py,sha256=_gVZdABuMnraCKViLruV75x0F3IpgFXN6amYSGE5_xc,4462
|
66
|
-
clarifai/runners/__init__.py,sha256=
|
66
|
+
clarifai/runners/__init__.py,sha256=CQhpUOj_x-oV9xEUKdL-hi3A1BQAtPUv-FFOev4a96w,281
|
67
67
|
clarifai/runners/server.py,sha256=9qVAs8pRHmtyY0RCNIQ1uP8nqDADIFZ03LnkoDt1h4U,4692
|
68
68
|
clarifai/runners/dockerfile_template/Dockerfile.template,sha256=5cjv7U8PmWa3DB_5B1CqSYh_6GE0E0np52TIAa7EIDE,2312
|
69
69
|
clarifai/runners/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
70
|
-
clarifai/runners/models/
|
70
|
+
clarifai/runners/models/dummy_openai_model.py,sha256=nbE6ZnLDUfor9omN536jMk64ZOi5OnwJqgvI93wTKqY,7220
|
71
|
+
clarifai/runners/models/mcp_class.py,sha256=7uwCMade0LMMBq7vczhPf4Kxdmh8Rj0R7Pg3pPxYdjQ,6386
|
71
72
|
clarifai/runners/models/model_builder.py,sha256=PiqPyTGPSKsYvOQNpBzs4e1_wuEbtE-P3yEkLE4Py10,49231
|
72
73
|
clarifai/runners/models/model_class.py,sha256=OHVd0tMOXDyl9v1vWeHOmYGx_dvP77N4zlLGMyTakag,15575
|
73
74
|
clarifai/runners/models/model_run_locally.py,sha256=6-6WjEKc0ba3gAv4wOLdMs2XOzS3b-2bZHJS0wdVqJY,20088
|
74
75
|
clarifai/runners/models/model_runner.py,sha256=SccX-RxTgruSpQaM21uMSl-z1x6fOa13fQZMQW8NNRY,7297
|
75
76
|
clarifai/runners/models/model_servicer.py,sha256=rRd_fNEXwqiBSzTUtPI2r07EBdcCPd8tcSPHeqTe0_I,3445
|
77
|
+
clarifai/runners/models/openai_class.py,sha256=bD5Th_Pgu8RRP0OsraWClDUtbrB9-bygfIBRpzWfOac,8074
|
76
78
|
clarifai/runners/models/visual_classifier_class.py,sha256=f9ZP8KFamMUdMpUG3AlL9nVCdcggy_E5n9RJY3ixR1U,2739
|
77
79
|
clarifai/runners/models/visual_detector_class.py,sha256=ky4oFAkGCKPpGPdgaOso-n6D3HcmnbKee_8hBsNiV8U,2883
|
78
80
|
clarifai/runners/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
79
|
-
clarifai/runners/utils/code_script.py,sha256=
|
81
|
+
clarifai/runners/utils/code_script.py,sha256=u8rN-Av7V8ZZK61P6wu6WOFkUDjU2gc0B8Z53Y6FEWQ,11137
|
80
82
|
clarifai/runners/utils/const.py,sha256=Q4Ps6gIEJCyTdQCfmT6PaS61WHmhT25XigV1NugWz-E,1544
|
81
83
|
clarifai/runners/utils/data_utils.py,sha256=4M4n-cGprBEBV5UkgOWaUlVfZ3WBTmegdffGQ3SfYCU,20750
|
82
84
|
clarifai/runners/utils/loader.py,sha256=K5Y8MPbIe5STw2gDnrL8KqFgKNxEo7bz-RV0ip1T4PM,10900
|
@@ -104,9 +106,9 @@ clarifai/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
104
106
|
clarifai/workflows/export.py,sha256=Oq3RVNKvv1iH46U6oIjXa-MXWJ4sTlXr_NSfwoxr3H4,2149
|
105
107
|
clarifai/workflows/utils.py,sha256=ESL3INcouNcLKCh-nMpfXX-YbtCzX7tz7hT57_RGQ3M,2079
|
106
108
|
clarifai/workflows/validate.py,sha256=UhmukyHkfxiMFrPPeBdUTiCOHQT5-shqivlBYEyKTlU,2931
|
107
|
-
clarifai-11.4.
|
108
|
-
clarifai-11.4.
|
109
|
-
clarifai-11.4.
|
110
|
-
clarifai-11.4.
|
111
|
-
clarifai-11.4.
|
112
|
-
clarifai-11.4.
|
109
|
+
clarifai-11.4.5.dist-info/licenses/LICENSE,sha256=mUqF_d12-qE2n41g7C5_sq-BMLOcj6CNN-jevr15YHU,555
|
110
|
+
clarifai-11.4.5.dist-info/METADATA,sha256=5wUGZgaAVSm1sGoBaaBp8Lq6shX5SrkHuYkAhSND000,22398
|
111
|
+
clarifai-11.4.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
112
|
+
clarifai-11.4.5.dist-info/entry_points.txt,sha256=X9FZ4Z-i_r2Ud1RpZ9sNIFYuu_-9fogzCMCRUD9hyX0,51
|
113
|
+
clarifai-11.4.5.dist-info/top_level.txt,sha256=wUMdCQGjkxaynZ6nZ9FAnvBUCgp5RJUVFSy2j-KYo0s,9
|
114
|
+
clarifai-11.4.5.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|