clarifai 11.4.9__tar.gz → 11.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (136) hide show
  1. {clarifai-11.4.9/clarifai.egg-info → clarifai-11.5.0}/PKG-INFO +3 -3
  2. clarifai-11.5.0/clarifai/__init__.py +1 -0
  3. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/dummy_openai_model.py +59 -26
  4. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/model_builder.py +1 -0
  5. clarifai-11.5.0/clarifai/runners/models/openai_class.py +167 -0
  6. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/visual_classifier_class.py +1 -1
  7. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/code_script.py +3 -1
  8. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/data_utils.py +7 -1
  9. {clarifai-11.4.9 → clarifai-11.5.0/clarifai.egg-info}/PKG-INFO +3 -3
  10. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai.egg-info/requires.txt +2 -2
  11. {clarifai-11.4.9 → clarifai-11.5.0}/requirements.txt +2 -2
  12. clarifai-11.4.9/clarifai/__init__.py +0 -1
  13. clarifai-11.4.9/clarifai/runners/models/openai_class.py +0 -221
  14. {clarifai-11.4.9 → clarifai-11.5.0}/LICENSE +0 -0
  15. {clarifai-11.4.9 → clarifai-11.5.0}/MANIFEST.in +0 -0
  16. {clarifai-11.4.9 → clarifai-11.5.0}/README.md +0 -0
  17. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli/README.md +0 -0
  18. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli/__init__.py +0 -0
  19. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli/__main__.py +0 -0
  20. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli/base.py +0 -0
  21. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli/compute_cluster.py +0 -0
  22. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli/deployment.py +0 -0
  23. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli/model.py +0 -0
  24. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli/model_templates.py +0 -0
  25. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli/nodepool.py +0 -0
  26. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/cli.py +0 -0
  27. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/__init__.py +0 -0
  28. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/app.py +0 -0
  29. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/auth/__init__.py +0 -0
  30. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/auth/helper.py +0 -0
  31. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/auth/register.py +0 -0
  32. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/auth/stub.py +0 -0
  33. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/base.py +0 -0
  34. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/compute_cluster.py +0 -0
  35. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/dataset.py +0 -0
  36. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/deployment.py +0 -0
  37. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/input.py +0 -0
  38. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/lister.py +0 -0
  39. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/model.py +0 -0
  40. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/model_client.py +0 -0
  41. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/module.py +0 -0
  42. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/nodepool.py +0 -0
  43. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/runner.py +0 -0
  44. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/search.py +0 -0
  45. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/user.py +0 -0
  46. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/client/workflow.py +0 -0
  47. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/constants/base.py +0 -0
  48. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/constants/dataset.py +0 -0
  49. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/constants/input.py +0 -0
  50. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/constants/model.py +0 -0
  51. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/constants/rag.py +0 -0
  52. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/constants/search.py +0 -0
  53. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/constants/workflow.py +0 -0
  54. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/__init__.py +0 -0
  55. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/export/__init__.py +0 -0
  56. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/export/inputs_annotations.py +0 -0
  57. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/__init__.py +0 -0
  58. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/base.py +0 -0
  59. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/features.py +0 -0
  60. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/image.py +0 -0
  61. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/loaders/README.md +0 -0
  62. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/loaders/__init__.py +0 -0
  63. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/loaders/coco_captions.py +0 -0
  64. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/loaders/coco_detection.py +0 -0
  65. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/loaders/imagenet_classification.py +0 -0
  66. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/loaders/xview_detection.py +0 -0
  67. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/multimodal.py +0 -0
  68. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/text.py +0 -0
  69. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/datasets/upload/utils.py +0 -0
  70. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/errors.py +0 -0
  71. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/models/__init__.py +0 -0
  72. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/models/api.py +0 -0
  73. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/modules/README.md +0 -0
  74. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/modules/__init__.py +0 -0
  75. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/modules/css.py +0 -0
  76. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/modules/pages.py +0 -0
  77. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/modules/style.css +0 -0
  78. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/rag/__init__.py +0 -0
  79. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/rag/rag.py +0 -0
  80. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/rag/utils.py +0 -0
  81. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/__init__.py +0 -0
  82. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/dockerfile_template/Dockerfile.template +0 -0
  83. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/__init__.py +0 -0
  84. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/mcp_class.py +0 -0
  85. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/model_class.py +0 -0
  86. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/model_run_locally.py +0 -0
  87. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/model_runner.py +0 -0
  88. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/model_servicer.py +0 -0
  89. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/models/visual_detector_class.py +0 -0
  90. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/server.py +0 -0
  91. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/__init__.py +0 -0
  92. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/const.py +0 -0
  93. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/data_types/__init__.py +0 -0
  94. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/data_types/data_types.py +0 -0
  95. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/loader.py +0 -0
  96. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/method_signatures.py +0 -0
  97. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/openai_convertor.py +0 -0
  98. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/serializers.py +0 -0
  99. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/runners/utils/url_fetcher.py +0 -0
  100. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/schema/search.py +0 -0
  101. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/urls/helper.py +0 -0
  102. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/__init__.py +0 -0
  103. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/cli.py +0 -0
  104. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/config.py +0 -0
  105. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/constants.py +0 -0
  106. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/evaluation/__init__.py +0 -0
  107. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/evaluation/helpers.py +0 -0
  108. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/evaluation/main.py +0 -0
  109. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/evaluation/testset_annotation_parser.py +0 -0
  110. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/logging.py +0 -0
  111. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/misc.py +0 -0
  112. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/model_train.py +0 -0
  113. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/utils/protobuf.py +0 -0
  114. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/versions.py +0 -0
  115. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/workflows/__init__.py +0 -0
  116. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/workflows/export.py +0 -0
  117. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/workflows/utils.py +0 -0
  118. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai/workflows/validate.py +0 -0
  119. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai.egg-info/SOURCES.txt +0 -0
  120. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai.egg-info/dependency_links.txt +0 -0
  121. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai.egg-info/entry_points.txt +0 -0
  122. {clarifai-11.4.9 → clarifai-11.5.0}/clarifai.egg-info/top_level.txt +0 -0
  123. {clarifai-11.4.9 → clarifai-11.5.0}/pyproject.toml +0 -0
  124. {clarifai-11.4.9 → clarifai-11.5.0}/setup.cfg +0 -0
  125. {clarifai-11.4.9 → clarifai-11.5.0}/setup.py +0 -0
  126. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_app.py +0 -0
  127. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_auth.py +0 -0
  128. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_data_upload.py +0 -0
  129. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_eval.py +0 -0
  130. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_misc.py +0 -0
  131. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_model_predict.py +0 -0
  132. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_model_train.py +0 -0
  133. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_modules.py +0 -0
  134. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_rag.py +0 -0
  135. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_search.py +0 -0
  136. {clarifai-11.4.9 → clarifai-11.5.0}/tests/test_stub.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clarifai
3
- Version: 11.4.9
3
+ Version: 11.5.0
4
4
  Home-page: https://github.com/Clarifai/clarifai-python
5
5
  Author: Clarifai
6
6
  Author-email: support@clarifai.com
@@ -19,8 +19,8 @@ Classifier: Operating System :: OS Independent
19
19
  Requires-Python: >=3.8
20
20
  Description-Content-Type: text/markdown
21
21
  License-File: LICENSE
22
- Requires-Dist: clarifai-grpc>=11.3.4
23
- Requires-Dist: clarifai-protocol>=0.0.23
22
+ Requires-Dist: clarifai-grpc>=11.5.5
23
+ Requires-Dist: clarifai-protocol>=0.0.24
24
24
  Requires-Dist: numpy>=1.22.0
25
25
  Requires-Dist: tqdm>=4.65.0
26
26
  Requires-Dist: PyYAML>=6.0.1
@@ -0,0 +1 @@
1
+ __version__ = "11.5.0"
@@ -13,9 +13,9 @@ class MockOpenAIClient:
13
13
  def create(self, **kwargs):
14
14
  """Mock create method for compatibility."""
15
15
  if kwargs.get("stream", False):
16
- return MockCompletionStream(kwargs.get("messages", []))
16
+ return MockCompletionStream(**kwargs)
17
17
  else:
18
- return MockCompletion(kwargs.get("messages", []))
18
+ return MockCompletion(**kwargs)
19
19
 
20
20
  def __init__(self):
21
21
  self.chat = self # Make self.chat point to self for compatibility
@@ -25,6 +25,19 @@ class MockOpenAIClient:
25
25
  class MockCompletion:
26
26
  """Mock completion object that mimics the OpenAI completion response structure."""
27
27
 
28
+ class Usage:
29
+ def __init__(self, prompt_tokens, completion_tokens, total_tokens):
30
+ self.total_tokens = total_tokens
31
+ self.prompt_tokens = prompt_tokens
32
+ self.completion_tokens = completion_tokens
33
+
34
+ def to_dict(self):
35
+ return dict(
36
+ total_tokens=self.total_tokens,
37
+ prompt_tokens=self.prompt_tokens,
38
+ completion_tokens=self.completion_tokens,
39
+ )
40
+
28
41
  class Choice:
29
42
  class Message:
30
43
  def __init__(self, content):
@@ -36,17 +49,21 @@ class MockCompletion:
36
49
  self.finish_reason = "stop"
37
50
  self.index = 0
38
51
 
39
- def __init__(self, messages):
52
+ def __init__(self, **kwargs):
40
53
  # Generate a simple response based on the last message
54
+ messages = kwargs.get("messages")
41
55
  last_message = messages[-1] if messages else {"content": ""}
42
56
  response_text = f"Echo: {last_message.get('content', '')}"
43
57
 
44
58
  self.choices = [self.Choice(response_text)]
45
- self.usage = {
46
- "prompt_tokens": len(str(messages)),
47
- "completion_tokens": len(response_text),
48
- "total_tokens": len(str(messages)) + len(response_text),
49
- }
59
+ self.usage = self.Usage(
60
+ **{
61
+ "prompt_tokens": len(str(messages)),
62
+ "completion_tokens": len(response_text),
63
+ "total_tokens": len(str(messages)) + len(response_text),
64
+ }
65
+ )
66
+
50
67
  self.id = "dummy-completion-id"
51
68
  self.created = 1234567890
52
69
  self.model = "dummy-model"
@@ -65,9 +82,12 @@ class MockCompletion:
65
82
  }
66
83
  for choice in self.choices
67
84
  ],
68
- "usage": self.usage,
85
+ "usage": self.usage.to_dict(),
69
86
  }
70
87
 
88
+ def model_dump(self):
89
+ return self.to_dict()
90
+
71
91
 
72
92
  class MockCompletionStream:
73
93
  """Mock completion stream that mimics the OpenAI streaming response structure."""
@@ -79,14 +99,27 @@ class MockCompletionStream:
79
99
  self.content = content
80
100
  self.role = "assistant" if content is None else None
81
101
 
102
+ class Usage:
103
+ def __init__(self, prompt_tokens, completion_tokens, total_tokens):
104
+ self.total_tokens = total_tokens
105
+ self.prompt_tokens = prompt_tokens
106
+ self.completion_tokens = completion_tokens
107
+
108
+ def to_dict(self):
109
+ return dict(
110
+ total_tokens=self.total_tokens,
111
+ prompt_tokens=self.prompt_tokens,
112
+ completion_tokens=self.completion_tokens,
113
+ )
114
+
82
115
  def __init__(self, content=None, include_usage=False):
83
116
  self.delta = self.Delta(content)
84
117
  self.finish_reason = None if content else "stop"
85
118
  self.index = 0
86
119
  self.usage = (
87
- {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}
120
+ self.Usage(**{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15})
88
121
  if include_usage
89
- else None
122
+ else self.Usage(None, None, None)
90
123
  )
91
124
 
92
125
  def __init__(self, content=None, include_usage=False):
@@ -114,11 +147,16 @@ class MockCompletionStream:
114
147
  ],
115
148
  }
116
149
  if self.usage:
117
- result["usage"] = self.usage
150
+ result["usage"] = self.usage.to_dict()
118
151
  return result
119
152
 
120
- def __init__(self, messages):
153
+ def model_dump(self):
154
+ return self.to_dict()
155
+
156
+ def __init__(self, **kwargs):
121
157
  # Generate a simple response based on the last message
158
+ messages = kwargs.get("messages")
159
+
122
160
  last_message = messages[-1] if messages else {"content": ""}
123
161
  self.response_text = f"Echo: {last_message.get('content', '')}"
124
162
  # Create chunks that ensure the full text is included in the first chunk
@@ -127,7 +165,7 @@ class MockCompletionStream:
127
165
  "", # Final chunk is empty to indicate completion
128
166
  ]
129
167
  self.current_chunk = 0
130
- self.include_usage = False
168
+ self.include_usage = kwargs.get("stream_options", {}).get("include_usage")
131
169
 
132
170
  def __iter__(self):
133
171
  return self
@@ -150,18 +188,14 @@ class DummyOpenAIModel(OpenAIModelClass):
150
188
  def _process_request(self, **kwargs) -> Dict[str, Any]:
151
189
  """Process a request for non-streaming responses."""
152
190
  completion_args = self._create_completion_args(kwargs)
153
- return self.client.chat.completions.create(**completion_args).to_dict()
191
+ return self.client.chat.completions.create(**completion_args).model_dump()
154
192
 
155
193
  def _process_streaming_request(self, **kwargs) -> Iterator[Dict[str, Any]]:
156
194
  """Process a request for streaming responses."""
157
- completion_args = self._create_completion_args(kwargs, stream=True)
158
- completion_stream = self.client.chat.completions.create(**completion_args)
159
- completion_stream.include_usage = kwargs.get('stream_options', {}).get(
160
- 'include_usage', False
161
- )
195
+ completion_stream = self.client.chat.completions.create(**kwargs)
162
196
 
163
197
  for chunk in completion_stream:
164
- yield chunk.to_dict()
198
+ yield chunk.model_dump()
165
199
 
166
200
  # Override the method directly for testing
167
201
  @OpenAIModelClass.method
@@ -169,14 +203,13 @@ class DummyOpenAIModel(OpenAIModelClass):
169
203
  """Direct implementation for testing purposes."""
170
204
  try:
171
205
  request_data = json.loads(req)
172
- params = self._extract_request_params(request_data)
173
-
206
+ request_data = self._create_completion_args(request_data)
174
207
  # Validate messages
175
- if not params.get("messages"):
208
+ if not request_data.get("messages"):
176
209
  yield "Error: No messages provided"
177
210
  return
178
211
 
179
- for message in params["messages"]:
212
+ for message in request_data["messages"]:
180
213
  if (
181
214
  not isinstance(message, dict)
182
215
  or "role" not in message
@@ -185,7 +218,7 @@ class DummyOpenAIModel(OpenAIModelClass):
185
218
  yield "Error: Invalid message format"
186
219
  return
187
220
 
188
- for chunk in self._process_streaming_request(**params):
221
+ for chunk in self._process_streaming_request(**request_data):
189
222
  yield json.dumps(chunk)
190
223
  except Exception as e:
191
224
  yield f"Error: {str(e)}"
@@ -839,6 +839,7 @@ class ModelBuilder:
839
839
  def get_model_version_proto(self):
840
840
  signatures = self.get_method_signatures()
841
841
  model_version_proto = resources_pb2.ModelVersion(
842
+ pretrained_model_config=resources_pb2.PretrainedModelConfig(),
842
843
  inference_compute_info=self.inference_compute_info,
843
844
  method_signatures=signatures,
844
845
  )
@@ -0,0 +1,167 @@
1
+ """Base class for creating OpenAI-compatible API server."""
2
+
3
+ import json
4
+ from typing import Any, Dict, Iterator
5
+
6
+ from clarifai.runners.models.model_class import ModelClass
7
+
8
+
9
+ class OpenAIModelClass(ModelClass):
10
+ """Base class for wrapping OpenAI-compatible servers as a model running in Clarifai.
11
+ This handles all the transport between the API and the OpenAI-compatible server.
12
+
13
+ To use this class, create a subclass and set the following class attributes:
14
+ - client: The OpenAI-compatible client instance
15
+ - model: The name of the model to use with the client
16
+
17
+ Example:
18
+ class MyOpenAIModel(OpenAIModelClass):
19
+ client = OpenAI(api_key="your-key")
20
+ model = "gpt-4"
21
+ """
22
+
23
+ # API Endpoints
24
+ ENDPOINT_CHAT_COMPLETIONS = "/chat/completions"
25
+ ENDPOINT_IMAGES_GENERATE = "/images/generations"
26
+ ENDPOINT_EMBEDDINGS = "/embeddings"
27
+ ENDPOINT_RESPONSES = "/responses"
28
+
29
+ # Default endpoint
30
+ DEFAULT_ENDPOINT = ENDPOINT_CHAT_COMPLETIONS
31
+
32
+ # These should be overridden in subclasses
33
+ client = None
34
+ model = None
35
+
36
+ def __init__(self) -> None:
37
+ if self.client is None:
38
+ raise NotImplementedError("Subclasses must set the 'client' class attribute")
39
+ if self.model is None:
40
+ try:
41
+ self.model = self.client.models.list().data[0].id
42
+ except Exception as e:
43
+ raise NotImplementedError(
44
+ "Subclasses must set the 'model' class attribute or ensure the client can list models"
45
+ ) from e
46
+
47
+ def _create_completion_args(self, params: Dict[str, Any]) -> Dict[str, Any]:
48
+ """Create the completion arguments dictionary from parameters.
49
+
50
+ Args:
51
+ params: Dictionary of parameters extracted from request
52
+
53
+ Returns:
54
+ Dict containing the completion arguments
55
+ """
56
+ completion_args = {**params}
57
+ completion_args.update({"model": self.model})
58
+ stream = completion_args.pop("stream", False)
59
+ if stream:
60
+ # Force to use usage
61
+ stream_options = params.pop("stream_options", {})
62
+ stream_options.update({"include_usage": True})
63
+ completion_args["stream_options"] = stream_options
64
+ completion_args["stream"] = stream
65
+
66
+ return completion_args
67
+
68
+ def _set_usage(self, resp):
69
+ if resp.usage and resp.usage.prompt_tokens and resp.usage.completion_tokens:
70
+ self.set_output_context(
71
+ prompt_tokens=resp.usage.prompt_tokens,
72
+ completion_tokens=resp.usage.completion_tokens,
73
+ )
74
+
75
+ def _handle_chat_completions(self, request_data: Dict[str, Any]):
76
+ """Handle chat completion requests."""
77
+ completion_args = self._create_completion_args(request_data)
78
+ completion = self.client.chat.completions.create(**completion_args)
79
+ self._set_usage(completion)
80
+ return completion
81
+
82
+ def _handle_images_generate(self, request_data: Dict[str, Any]):
83
+ """Handle image generation requests."""
84
+ image_args = {**request_data}
85
+ image_args.update({"model": self.model})
86
+ response = self.client.images.generate(**image_args)
87
+ return response
88
+
89
+ def _handle_embeddings(self, request_data: Dict[str, Any]):
90
+ """Handle embedding requests."""
91
+ embedding_args = {**request_data}
92
+ embedding_args.update({"model": self.model})
93
+ response = self.client.embeddings.create(**embedding_args)
94
+ return response
95
+
96
+ def _handle_responses(self, request_data: Dict[str, Any]):
97
+ """Handle response requests."""
98
+ response_args = {**request_data}
99
+ response_args.update({"model": self.model})
100
+ response = self.client.responses.create(**response_args)
101
+ return response
102
+
103
+ def _route_request(self, endpoint: str, request_data: Dict[str, Any]):
104
+ """Route the request to appropriate handler based on endpoint."""
105
+ handlers = {
106
+ self.ENDPOINT_CHAT_COMPLETIONS: self._handle_chat_completions,
107
+ self.ENDPOINT_IMAGES_GENERATE: self._handle_images_generate,
108
+ self.ENDPOINT_EMBEDDINGS: self._handle_embeddings,
109
+ self.ENDPOINT_RESPONSES: self._handle_responses,
110
+ }
111
+
112
+ handler = handlers.get(endpoint)
113
+ if not handler:
114
+ raise ValueError(f"Unsupported endpoint: {endpoint}")
115
+
116
+ return handler(request_data)
117
+
118
+ @ModelClass.method
119
+ def openai_transport(self, msg: str) -> str:
120
+ """Process an OpenAI-compatible request and send it to the appropriate OpenAI endpoint.
121
+
122
+ Args:
123
+ msg: JSON string containing the request parameters including 'openai_endpoint'
124
+
125
+ Returns:
126
+ JSON string containing the response or error
127
+ """
128
+ try:
129
+ request_data = json.loads(msg)
130
+ endpoint = request_data.pop("openai_endpoint", self.DEFAULT_ENDPOINT)
131
+ response = self._route_request(endpoint, request_data)
132
+ return json.dumps(response.model_dump())
133
+ except Exception as e:
134
+ return f"Error: {e}"
135
+
136
+ @ModelClass.method
137
+ def openai_stream_transport(self, msg: str) -> Iterator[str]:
138
+ """Process an OpenAI-compatible request and return a streaming response iterator.
139
+ This method is used when stream=True and returns an iterator of strings directly,
140
+ without converting to a list or JSON serializing. Supports chat completions and responses endpoints.
141
+
142
+ Args:
143
+ msg: The request as a JSON string.
144
+
145
+ Returns:
146
+ Iterator[str]: An iterator yielding text chunks from the streaming response.
147
+ """
148
+ try:
149
+ request_data = json.loads(msg)
150
+ endpoint = request_data.pop("openai_endpoint", self.DEFAULT_ENDPOINT)
151
+ if endpoint not in [self.ENDPOINT_CHAT_COMPLETIONS, self.ENDPOINT_RESPONSES]:
152
+ raise ValueError("Streaming is only supported for chat completions and responses.")
153
+
154
+ if endpoint == self.ENDPOINT_RESPONSES:
155
+ # Handle responses endpoint
156
+ stream_response = self._route_request(endpoint, request_data)
157
+ for chunk in stream_response:
158
+ yield json.dumps(chunk.model_dump())
159
+ else:
160
+ completion_args = self._create_completion_args(request_data)
161
+ stream_completion = self.client.chat.completions.create(**completion_args)
162
+ for chunk in stream_completion:
163
+ self._set_usage(chunk)
164
+ yield json.dumps(chunk.model_dump())
165
+
166
+ except Exception as e:
167
+ yield f"Error: {e}"
@@ -52,7 +52,7 @@ class VisualClassifierClass(ModelClass):
52
52
 
53
53
  @staticmethod
54
54
  def process_concepts(
55
- logits: torch.Tensor, threshold: float, model_labels: Dict[int, str]
55
+ logits: torch.Tensor, model_labels: Dict[int, str]
56
56
  ) -> List[List[Concept]]:
57
57
  """Convert model logits into a structured format of concepts.
58
58
 
@@ -111,7 +111,7 @@ model = Model.from_current_context()"""
111
111
  else:
112
112
  model_ui_url = url_helper.clarifai_url(user_id, app_id, "models", model_id)
113
113
  model_section = f"""
114
- model = Model({model_ui_url},
114
+ model = Model("{model_ui_url}",
115
115
  deployment_id = {deployment_id}, # Only needed for dedicated deployed models
116
116
  {base_url_str}
117
117
  )
@@ -133,6 +133,8 @@ model = Model({model_ui_url},
133
133
  continue
134
134
  if default_value is None and required:
135
135
  default_value = _set_default_value(param_type)
136
+ if param_type == "str" and default_value is not None:
137
+ default_value = json.dumps(default_value)
136
138
  client_script_str += f"{param_name}={default_value}, "
137
139
  client_script_str = client_script_str.rstrip(", ") + ")"
138
140
  if method_signature.method_type == resources_pb2.RunnerMethodType.UNARY_UNARY:
@@ -377,8 +377,14 @@ class Param(MessageData):
377
377
 
378
378
  if proto is None:
379
379
  proto = ParamProto()
380
- proto.default = json.dumps(default)
380
+
381
+ if isinstance(default, str):
382
+ proto.default = default
383
+ else:
384
+ proto.default = json.dumps(default)
385
+
381
386
  return proto
387
+
382
388
  except Exception:
383
389
  if default is not None:
384
390
  proto.default = str(default)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clarifai
3
- Version: 11.4.9
3
+ Version: 11.5.0
4
4
  Home-page: https://github.com/Clarifai/clarifai-python
5
5
  Author: Clarifai
6
6
  Author-email: support@clarifai.com
@@ -19,8 +19,8 @@ Classifier: Operating System :: OS Independent
19
19
  Requires-Python: >=3.8
20
20
  Description-Content-Type: text/markdown
21
21
  License-File: LICENSE
22
- Requires-Dist: clarifai-grpc>=11.3.4
23
- Requires-Dist: clarifai-protocol>=0.0.23
22
+ Requires-Dist: clarifai-grpc>=11.5.5
23
+ Requires-Dist: clarifai-protocol>=0.0.24
24
24
  Requires-Dist: numpy>=1.22.0
25
25
  Requires-Dist: tqdm>=4.65.0
26
26
  Requires-Dist: PyYAML>=6.0.1
@@ -1,5 +1,5 @@
1
- clarifai-grpc>=11.3.4
2
- clarifai-protocol>=0.0.23
1
+ clarifai-grpc>=11.5.5
2
+ clarifai-protocol>=0.0.24
3
3
  numpy>=1.22.0
4
4
  tqdm>=4.65.0
5
5
  PyYAML>=6.0.1
@@ -1,5 +1,5 @@
1
- clarifai-grpc>=11.3.4
2
- clarifai-protocol>=0.0.23
1
+ clarifai-grpc>=11.5.5
2
+ clarifai-protocol>=0.0.24
3
3
  numpy>=1.22.0
4
4
  tqdm>=4.65.0
5
5
  PyYAML>=6.0.1
@@ -1 +0,0 @@
1
- __version__ = "11.4.9"
@@ -1,221 +0,0 @@
1
- """Base class for creating OpenAI-compatible API server."""
2
-
3
- import json
4
- from typing import Any, Dict, Iterator
5
-
6
- from clarifai.runners.models.model_class import ModelClass
7
-
8
-
9
- class OpenAIModelClass(ModelClass):
10
- """Base class for wrapping OpenAI-compatible servers as a model running in Clarifai.
11
- This handles all the transport between the API and the OpenAI-compatible server.
12
-
13
- To use this class, create a subclass and set the following class attributes:
14
- - client: The OpenAI-compatible client instance
15
- - model: The name of the model to use with the client
16
-
17
- Example:
18
- class MyOpenAIModel(OpenAIModelClass):
19
- client = OpenAI(api_key="your-key")
20
- model = "gpt-4"
21
- """
22
-
23
- # These should be overridden in subclasses
24
- client = None
25
- model = None
26
-
27
- def __init__(self) -> None:
28
- if self.client is None:
29
- raise NotImplementedError("Subclasses must set the 'client' class attribute")
30
- if self.model is None:
31
- try:
32
- self.model = self.client.models.list().data[0].id
33
- except Exception as e:
34
- raise NotImplementedError(
35
- "Subclasses must set the 'model' class attribute or ensure the client can list models"
36
- ) from e
37
-
38
- def _extract_request_params(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
39
- """Extract and validate common openai arguments parameters from the request data.
40
-
41
- Args:
42
- request_data: The parsed JSON request data
43
-
44
- Returns:
45
- Dict containing the extracted parameters
46
- """
47
- return {
48
- "messages": request_data.get("messages", []),
49
- "temperature": request_data.get("temperature", 1.0),
50
- "max_tokens": request_data.get("max_tokens"),
51
- "max_completion_tokens": request_data.get("max_completion_tokens"),
52
- "n": request_data.get("n", 1),
53
- "frequency_penalty": request_data.get("frequency_penalty"),
54
- "presence_penalty": request_data.get("presence_penalty"),
55
- "top_p": request_data.get("top_p", 1.0),
56
- "reasoning_effort": request_data.get("reasoning_effort"),
57
- "response_format": request_data.get("response_format"),
58
- "stop": request_data.get("stop"),
59
- "tools": request_data.get("tools"),
60
- "tool_choice": request_data.get("tool_choice"),
61
- "tool_resources": request_data.get("tool_resources"),
62
- "modalities": request_data.get("modalities"),
63
- "stream_options": request_data.get("stream_options", {"include_usage": True}),
64
- }
65
-
66
- def _create_completion_args(
67
- self, params: Dict[str, Any], stream: bool = False
68
- ) -> Dict[str, Any]:
69
- """Create the completion arguments dictionary from parameters.
70
-
71
- Args:
72
- params: Dictionary of parameters extracted from request
73
- stream: Whether this is a streaming request
74
-
75
- Returns:
76
- Dict containing the completion arguments
77
- """
78
- completion_args = {
79
- "model": self.model,
80
- "messages": params["messages"],
81
- "temperature": params["temperature"],
82
- }
83
-
84
- if stream:
85
- completion_args["stream"] = True
86
- if params.get("stream_options"):
87
- completion_args["stream_options"] = params["stream_options"]
88
-
89
- # Add optional parameters if they exist
90
- optional_params = [
91
- "max_tokens",
92
- "max_completion_tokens",
93
- "n",
94
- "frequency_penalty",
95
- "presence_penalty",
96
- "top_p",
97
- "reasoning_effort",
98
- "response_format",
99
- "stop",
100
- "tools",
101
- "tool_choice",
102
- "tool_resources",
103
- "modalities",
104
- ]
105
-
106
- for param in optional_params:
107
- if params.get(param) is not None:
108
- completion_args[param] = params[param]
109
-
110
- return completion_args
111
-
112
- def _format_error_response(self, error: Exception) -> str:
113
- """Format an error response in OpenAI-compatible format.
114
-
115
- Args:
116
- error: The exception that occurred
117
-
118
- Returns:
119
- JSON string containing the error response
120
- """
121
- error_response = {
122
- "error": {
123
- "message": str(error),
124
- "type": "InvalidRequestError",
125
- "code": "invalid_request_error",
126
- }
127
- }
128
- return json.dumps(error_response)
129
-
130
- @ModelClass.method
131
- def openai_transport(self, msg: str) -> str:
132
- """The single model method to get the OpenAI-compatible request and send it to the OpenAI server
133
- then return its response.
134
-
135
- Args:
136
- msg: JSON string containing the request parameters
137
-
138
- Returns:
139
- JSON string containing the response or error
140
- """
141
- try:
142
- request_data = json.loads(msg)
143
- params = self._extract_request_params(request_data)
144
- stream = request_data.get("stream", False)
145
-
146
- if stream:
147
- chunks = self._process_streaming_request(**params)
148
- response_list = []
149
- for chunk in chunks:
150
- response_list.append(chunk)
151
- return json.dumps(response_list)
152
- else:
153
- completion = self._process_request(**params)
154
- if completion.get('usage'):
155
- if completion['usage'].get('prompt_tokens') and completion['usage'].get(
156
- 'completion_tokens'
157
- ):
158
- self.set_output_context(
159
- prompt_tokens=completion['usage']['prompt_tokens'],
160
- completion_tokens=completion['usage']['completion_tokens'],
161
- )
162
-
163
- return json.dumps(completion)
164
-
165
- except Exception as e:
166
- return self._format_error_response(e)
167
-
168
- @ModelClass.method
169
- def openai_stream_transport(self, msg: str) -> Iterator[str]:
170
- """Process an OpenAI-compatible request and return a streaming response iterator.
171
- This method is used when stream=True and returns an iterator of strings directly,
172
- without converting to a list or JSON serializing.
173
-
174
- Args:
175
- msg: The request as a JSON string.
176
-
177
- Returns:
178
- Iterator[str]: An iterator yielding text chunks from the streaming response.
179
- """
180
- try:
181
- request_data = json.loads(msg)
182
- params = self._extract_request_params(request_data)
183
- for chunk in self._process_streaming_request(**params):
184
- if chunk.get('usage'):
185
- if chunk['usage'].get('prompt_tokens') and chunk['usage'].get(
186
- 'completion_tokens'
187
- ):
188
- self.set_output_context(
189
- prompt_tokens=chunk['usage']['prompt_tokens'],
190
- completion_tokens=chunk['usage']['completion_tokens'],
191
- )
192
- yield json.dumps(chunk)
193
- except Exception as e:
194
- yield f"Error: {str(e)}"
195
-
196
- def _process_request(self, **kwargs) -> Any:
197
- """Process a standard (non-streaming) request using the OpenAI client.
198
-
199
- Args:
200
- **kwargs: Request parameters
201
-
202
- Returns:
203
- The completion response from the OpenAI client
204
- """
205
- completion_args = self._create_completion_args(kwargs)
206
- return self.client.chat.completions.create(**completion_args).to_dict()
207
-
208
- def _process_streaming_request(self, **kwargs) -> Iterator[str]:
209
- """Process a streaming request using the OpenAI client.
210
-
211
- Args:
212
- **kwargs: Request parameters
213
-
214
- Returns:
215
- Iterator yielding response chunks
216
- """
217
- completion_args = self._create_completion_args(kwargs, stream=True)
218
- completion_stream = self.client.chat.completions.create(**completion_args)
219
-
220
- for chunk in completion_stream:
221
- yield chunk.to_dict()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes