camel-ai 0.2.18__py3-none-any.whl → 0.2.20a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (34) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +27 -27
  3. camel/agents/multi_hop_generator_agent.py +35 -3
  4. camel/agents/programmed_agent_instruction.py +73 -18
  5. camel/configs/__init__.py +6 -0
  6. camel/configs/gemini_config.py +1 -1
  7. camel/configs/moonshot_config.py +63 -0
  8. camel/configs/sglang_config.py +4 -0
  9. camel/configs/siliconflow_config.py +91 -0
  10. camel/datagen/source2synth/__init__.py +31 -0
  11. camel/{synthetic_datagen → datagen}/source2synth/data_processor.py +194 -29
  12. camel/{synthetic_datagen → datagen}/source2synth/models.py +25 -0
  13. camel/{synthetic_datagen → datagen}/source2synth/user_data_processor_config.py +9 -8
  14. camel/datahubs/huggingface.py +3 -3
  15. camel/embeddings/__init__.py +2 -0
  16. camel/embeddings/jina_embedding.py +161 -0
  17. camel/messages/func_message.py +1 -1
  18. camel/models/__init__.py +2 -0
  19. camel/models/deepseek_model.py +29 -11
  20. camel/models/groq_model.py +0 -2
  21. camel/models/model_factory.py +6 -0
  22. camel/models/moonshot_model.py +138 -0
  23. camel/models/openai_model.py +1 -9
  24. camel/models/siliconflow_model.py +142 -0
  25. camel/toolkits/__init__.py +2 -0
  26. camel/toolkits/search_toolkit.py +17 -6
  27. camel/toolkits/semantic_scholar_toolkit.py +308 -0
  28. camel/types/enums.py +176 -15
  29. camel/types/unified_model_type.py +5 -0
  30. camel/utils/token_counting.py +1 -1
  31. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20a0.dist-info}/METADATA +9 -3
  32. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20a0.dist-info}/RECORD +34 -27
  33. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20a0.dist-info}/LICENSE +0 -0
  34. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20a0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,308 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import json
16
+ from typing import List, Optional
17
+
18
+ import requests
19
+
20
+ from camel.toolkits import FunctionTool
21
+ from camel.toolkits.base import BaseToolkit
22
+
23
+
24
+ class SemanticScholarToolkit(BaseToolkit):
25
+ r"""A toolkit for interacting with the Semantic Scholar
26
+ API to fetch paper and author data.
27
+ """
28
+
29
+ def __init__(self):
30
+ r"""Initializes the SemanticScholarToolkit."""
31
+ self.base_url = "https://api.semanticscholar.org/graph/v1"
32
+
33
+ def fetch_paper_data_title(
34
+ self,
35
+ paper_title: str,
36
+ fields: Optional[List[str]] = None,
37
+ ) -> dict:
38
+ r"""Fetches a SINGLE paper from the Semantic Scholar
39
+ API based on a paper title.
40
+
41
+ Args:
42
+ paper_title (str): The title of the paper to fetch.
43
+ fields (Optional[List[str]], optional): The fields to include in
44
+ the response (default: :obj:`None`). If not provided defaults
45
+ to ["title", "abstract", "authors", "year", "citationCount",
46
+ "publicationTypes", "publicationDate", "openAccessPdf"].
47
+
48
+ Returns:
49
+ dict: The response data from the API or error information if the
50
+ request fails.
51
+ """
52
+ if fields is None:
53
+ fields = [
54
+ "title",
55
+ "abstract",
56
+ "authors",
57
+ "year",
58
+ "citationCount",
59
+ "publicationTypes",
60
+ "publicationDate",
61
+ "openAccessPdf",
62
+ ]
63
+
64
+ url = f"{self.base_url}/paper/search"
65
+ query_params = {"query": paper_title, "fields": ",".join(fields)}
66
+ try:
67
+ response = requests.get(url, params=query_params)
68
+ response.raise_for_status()
69
+ return response.json()
70
+ except requests.exceptions.RequestException as e:
71
+ return {
72
+ "error": f"Request failed: {e!s}",
73
+ "message": str(e),
74
+ }
75
+ except ValueError:
76
+ return {
77
+ "error": "Response is not valid JSON",
78
+ "message": response.text,
79
+ }
80
+
81
+ def fetch_paper_data_id(
82
+ self,
83
+ paper_id: str,
84
+ fields: Optional[List[str]] = None,
85
+ ) -> dict:
86
+ r"""Fetches a SINGLE paper from the Semantic Scholar
87
+ API based on a paper ID.
88
+
89
+ Args:
90
+ paper_id (str): The ID of the paper to fetch.
91
+ fields (Optional[List[str]], optional): The fields to include in
92
+ the response (default: :obj:`None`). If not provided defaults
93
+ to ["title", "abstract", "authors", "year", "citationCount",
94
+ "publicationTypes", "publicationDate", "openAccessPdf"].
95
+
96
+ Returns:
97
+ dict: The response data from the API or error information
98
+ if the request fails.
99
+ """
100
+ if fields is None:
101
+ fields = [
102
+ "title",
103
+ "abstract",
104
+ "authors",
105
+ "year",
106
+ "citationCount",
107
+ "publicationTypes",
108
+ "publicationDate",
109
+ "openAccessPdf",
110
+ ]
111
+
112
+ url = f"{self.base_url}/paper/{paper_id}"
113
+ query_params = {"fields": ",".join(fields)}
114
+ try:
115
+ response = requests.get(url, params=query_params)
116
+ response.raise_for_status()
117
+ return response.json()
118
+ except requests.exceptions.RequestException as e:
119
+ return {
120
+ "error": f"Request failed: {e!s}",
121
+ "message": str(e),
122
+ }
123
+ except ValueError:
124
+ return {
125
+ "error": "Response is not valid JSON",
126
+ "message": response.text,
127
+ }
128
+
129
+ def fetch_bulk_paper_data(
130
+ self,
131
+ query: str,
132
+ year: str = "2023-",
133
+ fields: Optional[List[str]] = None,
134
+ ) -> dict:
135
+ r"""Fetches MULTIPLE papers at once from the Semantic Scholar
136
+ API based on a related topic.
137
+
138
+ Args:
139
+ query (str): The text query to match against the paper's title and
140
+ abstract. For example, you can use the following operators and
141
+ techniques to construct your query: Example 1: ((cloud
142
+ computing) | virtualization) +security -privacy This will
143
+ match papers whose title or abstract contains "cloud" and
144
+ "computing", or contains the word "virtualization". The papers
145
+ must also include the term "security" but exclude papers that
146
+ contain the word "privacy".
147
+ year (str, optional): The year filter for papers (default:
148
+ :obj:`"2023-"`).
149
+ fields (Optional[List[str]], optional): The fields to include in
150
+ the response (default: :obj:`None`). If not provided defaults
151
+ to ["title", "url", "publicationTypes", "publicationDate",
152
+ "openAccessPdf"].
153
+
154
+ Returns:
155
+ dict: The response data from the API or error information if the
156
+ request fails.
157
+ """
158
+ if fields is None:
159
+ fields = [
160
+ "title",
161
+ "url",
162
+ "publicationTypes",
163
+ "publicationDate",
164
+ "openAccessPdf",
165
+ ]
166
+
167
+ url = f"{self.base_url}/paper/search/bulk"
168
+ query_params = {
169
+ "query": query,
170
+ "fields": ",".join(fields),
171
+ "year": year,
172
+ }
173
+ try:
174
+ response = requests.get(url, params=query_params)
175
+ response.raise_for_status()
176
+ return response.json()
177
+ except requests.exceptions.RequestException as e:
178
+ return {
179
+ "error": f"Request failed: {e!s}",
180
+ "message": str(e),
181
+ }
182
+ except ValueError:
183
+ return {
184
+ "error": "Response is not valid JSON",
185
+ "message": response.text,
186
+ }
187
+
188
+ def fetch_recommended_papers(
189
+ self,
190
+ positive_paper_ids: List[str],
191
+ negative_paper_ids: List[str],
192
+ fields: Optional[List[str]] = None,
193
+ limit: int = 500,
194
+ save_to_file: bool = False,
195
+ ) -> dict:
196
+ r"""Fetches recommended papers from the Semantic Scholar
197
+ API based on the positive and negative paper IDs.
198
+
199
+ Args:
200
+ positive_paper_ids (list): A list of paper IDs (as strings)
201
+ that are positively correlated to the recommendation.
202
+ negative_paper_ids (list): A list of paper IDs (as strings)
203
+ that are negatively correlated to the recommendation.
204
+ fields (Optional[List[str]], optional): The fields to include in
205
+ the response (default: :obj:`None`). If not provided defaults
206
+ to ["title", "url", "citationCount", "authors",
207
+ "publicationTypes", "publicationDate", "openAccessPdf"].
208
+ limit (int, optional): The maximum number of recommended papers to
209
+ return (default: :obj:`500`).
210
+ save_to_file (bool, optional): If True, saves the response data to
211
+ a file (default: :obj:`False`).
212
+
213
+ Returns:
214
+ dict: A dictionary containing recommended papers sorted by
215
+ citation count.
216
+ """
217
+ if fields is None:
218
+ fields = [
219
+ "title",
220
+ "url",
221
+ "citationCount",
222
+ "authors",
223
+ "publicationTypes",
224
+ "publicationDate",
225
+ "openAccessPdf",
226
+ ]
227
+
228
+ url = "https://api.semanticscholar.org/recommendations/v1/papers"
229
+ query_params = {"fields": ",".join(fields), "limit": str(limit)}
230
+ data = {
231
+ "positive_paper_ids": positive_paper_ids,
232
+ "negative_paper_ids": negative_paper_ids,
233
+ }
234
+ try:
235
+ response = requests.post(url, params=query_params, json=data)
236
+ response.raise_for_status()
237
+ papers = response.json()
238
+ if save_to_file:
239
+ with open('recommended_papers.json', 'w') as output:
240
+ json.dump(papers, output)
241
+ return papers
242
+ except requests.exceptions.RequestException as e:
243
+ return {"error": str(e)}
244
+ except ValueError:
245
+ return {
246
+ "error": "Response is not valid JSON",
247
+ "message": response.text,
248
+ }
249
+
250
+ def fetch_author_data(
251
+ self,
252
+ ids: List[str],
253
+ fields: Optional[List[str]] = None,
254
+ save_to_file: bool = False,
255
+ ) -> dict:
256
+ r"""Fetches author information from the Semantic Scholar
257
+ API based on author IDs.
258
+
259
+ Args:
260
+ ids (list): A list of author IDs (as strings) to fetch
261
+ data for.
262
+ fields (Optional[List[str]], optional): The fields to include in
263
+ the response (default: :obj:`None`). If not provided defaults
264
+ to ["name", "url", "paperCount", "hIndex", "papers"].
265
+ save_to_file (bool, optional): Whether to save the results to a
266
+ file (default: :obj:`False`).
267
+
268
+ Returns:
269
+ dict: The response data from the API or error information if
270
+ the request fails.
271
+ """
272
+ if fields is None:
273
+ fields = ["name", "url", "paperCount", "hIndex", "papers"]
274
+
275
+ url = f"{self.base_url}/author/batch"
276
+ query_params = {"fields": ",".join(fields)}
277
+ data = {"ids": ids}
278
+ try:
279
+ response = requests.post(url, params=query_params, json=data)
280
+ response.raise_for_status()
281
+ response_data = response.json()
282
+ if save_to_file:
283
+ with open('author_information.json', 'w') as output:
284
+ json.dump(response_data, output)
285
+ return response_data
286
+ except requests.exceptions.RequestException as e:
287
+ return {"error": str(e)}
288
+ except ValueError:
289
+ return {
290
+ "error": "Response is not valid JSON",
291
+ "message": response.text,
292
+ }
293
+
294
+ def get_tools(self) -> List[FunctionTool]:
295
+ r"""Returns a list of FunctionTool objects representing the
296
+ functions in the toolkit.
297
+
298
+ Returns:
299
+ List[FunctionTool]: A list of FunctionTool objects
300
+ representing the functions in the toolkit.
301
+ """
302
+ return [
303
+ FunctionTool(self.fetch_paper_data_title),
304
+ FunctionTool(self.fetch_paper_data_id),
305
+ FunctionTool(self.fetch_bulk_paper_data),
306
+ FunctionTool(self.fetch_recommended_papers),
307
+ FunctionTool(self.fetch_author_data),
308
+ ]
camel/types/enums.py CHANGED
@@ -37,21 +37,29 @@ class ModelType(UnifiedModelType, Enum):
37
37
  O1 = "o1"
38
38
  O1_PREVIEW = "o1-preview"
39
39
  O1_MINI = "o1-mini"
40
+ O3_MINI = "o3-mini"
40
41
 
41
42
  GLM_4 = "glm-4"
42
- GLM_4V = 'glm-4v'
43
+ GLM_4V = "glm-4v"
44
+ GLM_4V_FLASH = "glm-4v-flash"
45
+ GLM_4V_PLUS_0111 = "glm-4v-plus-0111"
46
+ GLM_4_PLUS = "glm-4-plus"
47
+ GLM_4_AIR = "glm-4-air"
48
+ GLM_4_AIR_0111 = "glm-4-air-0111"
49
+ GLM_4_AIRX = "glm-4-airx"
50
+ GLM_4_LONG = "glm-4-long"
51
+ GLM_4_FLASHX = "glm-4-flashx"
52
+ GLM_4_FLASH = "glm-4-flash"
53
+ GLM_ZERO_PREVIEW = "glm-zero-preview"
43
54
  GLM_3_TURBO = "glm-3-turbo"
44
55
 
45
56
  # Groq platform models
46
57
  GROQ_LLAMA_3_1_8B = "llama-3.1-8b-instant"
47
- GROQ_LLAMA_3_1_70B = "llama-3.1-70b-versatile"
48
- GROQ_LLAMA_3_1_405B = "llama-3.1-405b-reasoning"
49
58
  GROQ_LLAMA_3_3_70B = "llama-3.3-70b-versatile"
50
59
  GROQ_LLAMA_3_3_70B_PREVIEW = "llama-3.3-70b-specdec"
51
60
  GROQ_LLAMA_3_8B = "llama3-8b-8192"
52
61
  GROQ_LLAMA_3_70B = "llama3-70b-8192"
53
62
  GROQ_MIXTRAL_8_7B = "mixtral-8x7b-32768"
54
- GROQ_GEMMA_7B_IT = "gemma-7b-it"
55
63
  GROQ_GEMMA_2_9B_IT = "gemma2-9b-it"
56
64
 
57
65
  # TogetherAI platform models support tool calling
@@ -67,6 +75,17 @@ class ModelType(UnifiedModelType, Enum):
67
75
  SAMBA_LLAMA_3_1_70B = "Meta-Llama-3.1-70B-Instruct"
68
76
  SAMBA_LLAMA_3_1_405B = "Meta-Llama-3.1-405B-Instruct"
69
77
 
78
+ # SGLang models support tool calling
79
+ SGLANG_LLAMA_3_1_8B = "meta-llama/Meta-Llama-3.1-8B-Instruct"
80
+ SGLANG_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct"
81
+ SGLANG_LLAMA_3_1_405B = "meta-llama/Meta-Llama-3.1-405B-Instruct"
82
+ SGLANG_LLAMA_3_2_1B = "meta-llama/Llama-3.2-1B-Instruct"
83
+ SGLANG_MIXTRAL_NEMO = "mistralai/Mistral-Nemo-Instruct-2407"
84
+ SGLANG_MISTRAL_7B = "mistralai/Mistral-7B-Instruct-v0.3"
85
+ SGLANG_QWEN_2_5_7B = "Qwen/Qwen2.5-7B-Instruct"
86
+ SGLANG_QWEN_2_5_32B = "Qwen/Qwen2.5-32B-Instruct"
87
+ SGLANG_QWEN_2_5_72B = "Qwen/Qwen2.5-72B-Instruct"
88
+
70
89
  STUB = "stub"
71
90
 
72
91
  # Legacy anthropic models
@@ -97,9 +116,12 @@ class ModelType(UnifiedModelType, Enum):
97
116
  NVIDIA_LLAMA3_3_70B_INSTRUCT = "meta/llama-3.3-70b-instruct"
98
117
 
99
118
  # Gemini models
119
+ GEMINI_2_0_FLASH = "gemini-2.0-flash-exp"
120
+ GEMINI_2_0_FLASH_THINKING = "gemini-2.0-flash-thinking-exp"
121
+ GEMINI_2_0_PRO_EXP = "gemini-2.0-pro-exp-02-05"
122
+ GEMINI_2_0_FLASH_LITE_PREVIEW = "gemini-2.0-flash-lite-preview-02-05"
100
123
  GEMINI_1_5_FLASH = "gemini-1.5-flash"
101
124
  GEMINI_1_5_PRO = "gemini-1.5-pro"
102
- GEMINI_EXP_1114 = "gemini-exp-1114"
103
125
 
104
126
  # Mistral AI models
105
127
  MISTRAL_3B = "ministral-3b-latest"
@@ -136,6 +158,7 @@ class ModelType(UnifiedModelType, Enum):
136
158
  QWEN_MATH_TURBO = "qwen-math-turbo"
137
159
  QWEN_CODER_TURBO = "qwen-coder-turbo"
138
160
  QWEN_2_5_CODER_32B = "qwen2.5-coder-32b-instruct"
161
+ QWEN_2_5_VL_72B = "qwen2.5-vl-72b-instruct"
139
162
  QWEN_2_5_72B = "qwen2.5-72b-instruct"
140
163
  QWEN_2_5_32B = "qwen2.5-32b-instruct"
141
164
  QWEN_2_5_14B = "qwen2.5-14b-instruct"
@@ -161,6 +184,25 @@ class ModelType(UnifiedModelType, Enum):
161
184
  INTERNLM2_5_LATEST = "internlm2.5-latest"
162
185
  INTERNLM2_PRO_CHAT = "internlm2-pro-chat"
163
186
 
187
+ # Moonshot models
188
+ MOONSHOT_V1_8K = "moonshot-v1-8k"
189
+ MOONSHOT_V1_32K = "moonshot-v1-32k"
190
+ MOONSHOT_V1_128K = "moonshot-v1-128k"
191
+
192
+ # SiliconFlow models support tool calling
193
+ SILICONFLOW_DEEPSEEK_V2_5 = "deepseek-ai/DeepSeek-V2.5"
194
+ SILICONFLOW_DEEPSEEK_V3 = "deepseek-ai/DeepSeek-V3"
195
+ SILICONFLOW_INTERN_LM2_5_20B_CHAT = "internlm/internlm2_5-20b-chat"
196
+ SILICONFLOW_INTERN_LM2_5_7B_CHAT = "internlm/internlm2_5-7b-chat"
197
+ SILICONFLOW_PRO_INTERN_LM2_5_7B_CHAT = "Pro/internlm/internlm2_5-7b-chat"
198
+ SILICONFLOW_QWEN2_5_72B_INSTRUCT = "Qwen/Qwen2.5-72B-Instruct"
199
+ SILICONFLOW_QWEN2_5_32B_INSTRUCT = "Qwen/Qwen2.5-32B-Instruct"
200
+ SILICONFLOW_QWEN2_5_14B_INSTRUCT = "Qwen/Qwen2.5-14B-Instruct"
201
+ SILICONFLOW_QWEN2_5_7B_INSTRUCT = "Qwen/Qwen2.5-7B-Instruct"
202
+ SILICONFLOW_PRO_QWEN2_5_7B_INSTRUCT = "Pro/Qwen/Qwen2.5-7B-Instruct"
203
+ SILICONFLOW_THUDM_GLM_4_9B_CHAT = "THUDM/glm-4-9b-chat"
204
+ SILICONFLOW_PRO_THUDM_GLM_4_9B_CHAT = "Pro/THUDM/glm-4-9b-chat"
205
+
164
206
  def __str__(self):
165
207
  return self.value
166
208
 
@@ -190,6 +232,10 @@ class ModelType(UnifiedModelType, Enum):
190
232
  self.is_internlm,
191
233
  self.is_together,
192
234
  self.is_sambanova,
235
+ self.is_groq,
236
+ self.is_sglang,
237
+ self.is_moonshot,
238
+ self.is_siliconflow,
193
239
  ]
194
240
  )
195
241
 
@@ -205,6 +251,7 @@ class ModelType(UnifiedModelType, Enum):
205
251
  ModelType.O1,
206
252
  ModelType.O1_PREVIEW,
207
253
  ModelType.O1_MINI,
254
+ ModelType.O3_MINI,
208
255
  }
209
256
 
210
257
  @property
@@ -227,6 +274,16 @@ class ModelType(UnifiedModelType, Enum):
227
274
  ModelType.GLM_3_TURBO,
228
275
  ModelType.GLM_4,
229
276
  ModelType.GLM_4V,
277
+ ModelType.GLM_4V_FLASH,
278
+ ModelType.GLM_4V_PLUS_0111,
279
+ ModelType.GLM_4_PLUS,
280
+ ModelType.GLM_4_AIR,
281
+ ModelType.GLM_4_AIR_0111,
282
+ ModelType.GLM_4_AIRX,
283
+ ModelType.GLM_4_LONG,
284
+ ModelType.GLM_4_FLASHX,
285
+ ModelType.GLM_4_FLASH,
286
+ ModelType.GLM_ZERO_PREVIEW,
230
287
  }
231
288
 
232
289
  @property
@@ -252,14 +309,11 @@ class ModelType(UnifiedModelType, Enum):
252
309
  r"""Returns whether this type of models is served by Groq."""
253
310
  return self in {
254
311
  ModelType.GROQ_LLAMA_3_1_8B,
255
- ModelType.GROQ_LLAMA_3_1_70B,
256
- ModelType.GROQ_LLAMA_3_1_405B,
257
312
  ModelType.GROQ_LLAMA_3_3_70B,
258
313
  ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
259
314
  ModelType.GROQ_LLAMA_3_8B,
260
315
  ModelType.GROQ_LLAMA_3_70B,
261
316
  ModelType.GROQ_MIXTRAL_8_7B,
262
- ModelType.GROQ_GEMMA_7B_IT,
263
317
  ModelType.GROQ_GEMMA_2_9B_IT,
264
318
  }
265
319
 
@@ -277,7 +331,7 @@ class ModelType(UnifiedModelType, Enum):
277
331
 
278
332
  @property
279
333
  def is_sambanova(self) -> bool:
280
- r"""Returns whether this type of models is served by SambaNova AI."""
334
+ r"""Returns whether this type of model is served by SambaNova AI."""
281
335
  return self in {
282
336
  ModelType.SAMBA_LLAMA_3_1_8B,
283
337
  ModelType.SAMBA_LLAMA_3_1_70B,
@@ -326,9 +380,12 @@ class ModelType(UnifiedModelType, Enum):
326
380
  bool: Whether this type of models is gemini.
327
381
  """
328
382
  return self in {
383
+ ModelType.GEMINI_2_0_FLASH,
329
384
  ModelType.GEMINI_1_5_FLASH,
330
385
  ModelType.GEMINI_1_5_PRO,
331
- ModelType.GEMINI_EXP_1114,
386
+ ModelType.GEMINI_2_0_FLASH_THINKING,
387
+ ModelType.GEMINI_2_0_PRO_EXP,
388
+ ModelType.GEMINI_2_0_FLASH_LITE_PREVIEW,
332
389
  }
333
390
 
334
391
  @property
@@ -391,6 +448,7 @@ class ModelType(UnifiedModelType, Enum):
391
448
  ModelType.QWEN_MATH_TURBO,
392
449
  ModelType.QWEN_CODER_TURBO,
393
450
  ModelType.QWEN_2_5_CODER_32B,
451
+ ModelType.QWEN_2_5_VL_72B,
394
452
  ModelType.QWEN_2_5_72B,
395
453
  ModelType.QWEN_2_5_32B,
396
454
  ModelType.QWEN_2_5_14B,
@@ -413,6 +471,45 @@ class ModelType(UnifiedModelType, Enum):
413
471
  ModelType.INTERNLM2_PRO_CHAT,
414
472
  }
415
473
 
474
+ @property
475
+ def is_moonshot(self) -> bool:
476
+ return self in {
477
+ ModelType.MOONSHOT_V1_8K,
478
+ ModelType.MOONSHOT_V1_32K,
479
+ ModelType.MOONSHOT_V1_128K,
480
+ }
481
+
482
+ @property
483
+ def is_sglang(self) -> bool:
484
+ return self in {
485
+ ModelType.SGLANG_LLAMA_3_1_8B,
486
+ ModelType.SGLANG_LLAMA_3_1_70B,
487
+ ModelType.SGLANG_LLAMA_3_1_405B,
488
+ ModelType.SGLANG_LLAMA_3_2_1B,
489
+ ModelType.SGLANG_MIXTRAL_NEMO,
490
+ ModelType.SGLANG_MISTRAL_7B,
491
+ ModelType.SGLANG_QWEN_2_5_7B,
492
+ ModelType.SGLANG_QWEN_2_5_32B,
493
+ ModelType.SGLANG_QWEN_2_5_72B,
494
+ }
495
+
496
+ @property
497
+ def is_siliconflow(self) -> bool:
498
+ return self in {
499
+ ModelType.SILICONFLOW_DEEPSEEK_V2_5,
500
+ ModelType.SILICONFLOW_DEEPSEEK_V3,
501
+ ModelType.SILICONFLOW_INTERN_LM2_5_20B_CHAT,
502
+ ModelType.SILICONFLOW_INTERN_LM2_5_7B_CHAT,
503
+ ModelType.SILICONFLOW_PRO_INTERN_LM2_5_7B_CHAT,
504
+ ModelType.SILICONFLOW_QWEN2_5_72B_INSTRUCT,
505
+ ModelType.SILICONFLOW_QWEN2_5_32B_INSTRUCT,
506
+ ModelType.SILICONFLOW_QWEN2_5_14B_INSTRUCT,
507
+ ModelType.SILICONFLOW_QWEN2_5_7B_INSTRUCT,
508
+ ModelType.SILICONFLOW_PRO_QWEN2_5_7B_INSTRUCT,
509
+ ModelType.SILICONFLOW_THUDM_GLM_4_9B_CHAT,
510
+ ModelType.SILICONFLOW_PRO_THUDM_GLM_4_9B_CHAT,
511
+ }
512
+
416
513
  @property
417
514
  def token_limit(self) -> int:
418
515
  r"""Returns the maximum token limit for a given model.
@@ -440,13 +537,15 @@ class ModelType(UnifiedModelType, Enum):
440
537
  ModelType.GROQ_LLAMA_3_8B,
441
538
  ModelType.GROQ_LLAMA_3_70B,
442
539
  ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
443
- ModelType.GROQ_GEMMA_7B_IT,
444
540
  ModelType.GROQ_GEMMA_2_9B_IT,
445
541
  ModelType.GLM_3_TURBO,
446
542
  ModelType.GLM_4,
447
543
  ModelType.QWEN_VL_PLUS,
448
544
  ModelType.NVIDIA_LLAMA3_70B,
449
545
  ModelType.TOGETHER_MISTRAL_7B,
546
+ ModelType.MOONSHOT_V1_8K,
547
+ ModelType.GLM_4V_FLASH,
548
+ ModelType.GLM_4_AIRX,
450
549
  }:
451
550
  return 8_192
452
551
  elif self in {
@@ -459,6 +558,8 @@ class ModelType(UnifiedModelType, Enum):
459
558
  ModelType.YI_LARGE_RAG,
460
559
  ModelType.SAMBA_LLAMA_3_1_8B,
461
560
  ModelType.SAMBA_LLAMA_3_1_405B,
561
+ ModelType.GLM_4V_PLUS_0111,
562
+ ModelType.GLM_ZERO_PREVIEW,
462
563
  }:
463
564
  return 16_384
464
565
  elif self in {
@@ -479,6 +580,8 @@ class ModelType(UnifiedModelType, Enum):
479
580
  ModelType.INTERNLM2_5_LATEST,
480
581
  ModelType.INTERNLM2_PRO_CHAT,
481
582
  ModelType.TOGETHER_MIXTRAL_8_7B,
583
+ ModelType.SGLANG_MISTRAL_7B,
584
+ ModelType.MOONSHOT_V1_32K,
482
585
  }:
483
586
  return 32_768
484
587
  elif self in {
@@ -504,6 +607,7 @@ class ModelType(UnifiedModelType, Enum):
504
607
  ModelType.MISTRAL_8B,
505
608
  ModelType.MISTRAL_3B,
506
609
  ModelType.QWEN_2_5_CODER_32B,
610
+ ModelType.QWEN_2_5_VL_72B,
507
611
  ModelType.QWEN_2_5_72B,
508
612
  ModelType.QWEN_2_5_32B,
509
613
  ModelType.QWEN_2_5_14B,
@@ -518,12 +622,21 @@ class ModelType(UnifiedModelType, Enum):
518
622
  ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT,
519
623
  ModelType.GROQ_LLAMA_3_3_70B,
520
624
  ModelType.SAMBA_LLAMA_3_1_70B,
625
+ ModelType.SGLANG_LLAMA_3_1_8B,
626
+ ModelType.SGLANG_LLAMA_3_1_70B,
627
+ ModelType.SGLANG_LLAMA_3_1_405B,
628
+ ModelType.SGLANG_LLAMA_3_2_1B,
629
+ ModelType.SGLANG_MIXTRAL_NEMO,
630
+ ModelType.MOONSHOT_V1_128K,
631
+ ModelType.GLM_4_PLUS,
632
+ ModelType.GLM_4_AIR,
633
+ ModelType.GLM_4_AIR_0111,
634
+ ModelType.GLM_4_FLASHX,
635
+ ModelType.GLM_4_FLASH,
521
636
  }:
522
637
  return 128_000
523
638
  elif self in {
524
639
  ModelType.GROQ_LLAMA_3_1_8B,
525
- ModelType.GROQ_LLAMA_3_1_70B,
526
- ModelType.GROQ_LLAMA_3_1_405B,
527
640
  ModelType.QWEN_PLUS,
528
641
  ModelType.QWEN_TURBO,
529
642
  ModelType.QWEN_CODER_TURBO,
@@ -531,10 +644,14 @@ class ModelType(UnifiedModelType, Enum):
531
644
  ModelType.TOGETHER_LLAMA_3_1_70B,
532
645
  ModelType.TOGETHER_LLAMA_3_1_405B,
533
646
  ModelType.TOGETHER_LLAMA_3_3_70B,
647
+ ModelType.SGLANG_QWEN_2_5_7B,
648
+ ModelType.SGLANG_QWEN_2_5_32B,
649
+ ModelType.SGLANG_QWEN_2_5_72B,
534
650
  }:
535
651
  return 131_072
536
652
  elif self in {
537
653
  ModelType.O1,
654
+ ModelType.O3_MINI,
538
655
  ModelType.CLAUDE_2_1,
539
656
  ModelType.CLAUDE_3_OPUS,
540
657
  ModelType.CLAUDE_3_SONNET,
@@ -549,9 +666,13 @@ class ModelType(UnifiedModelType, Enum):
549
666
  }:
550
667
  return 256_000
551
668
  elif self in {
669
+ ModelType.GEMINI_2_0_FLASH,
552
670
  ModelType.GEMINI_1_5_FLASH,
553
671
  ModelType.GEMINI_1_5_PRO,
554
- ModelType.GEMINI_EXP_1114, # Not given in docs, assuming the same
672
+ ModelType.GEMINI_2_0_FLASH_THINKING,
673
+ ModelType.GEMINI_2_0_FLASH_LITE_PREVIEW,
674
+ ModelType.GEMINI_2_0_PRO_EXP, # Not given in doc, assume the same
675
+ ModelType.GLM_4_LONG,
555
676
  }:
556
677
  return 1_048_576
557
678
  elif self in {
@@ -567,6 +688,11 @@ class EmbeddingModelType(Enum):
567
688
  TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small"
568
689
  TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
569
690
 
691
+ JINA_EMBEDDINGS_V3 = "jina-embeddings-v3"
692
+ JINA_CLIP_V2 = "jina-clip-v2"
693
+ JINA_COLBERT_V2 = "jina-colbert-v2"
694
+ JINA_EMBEDDINGS_V2_BASE_CODE = "jina-embeddings-v2-base-code"
695
+
570
696
  MISTRAL_EMBED = "mistral-embed"
571
697
 
572
698
  @property
@@ -578,6 +704,16 @@ class EmbeddingModelType(Enum):
578
704
  EmbeddingModelType.TEXT_EMBEDDING_3_LARGE,
579
705
  }
580
706
 
707
+ @property
708
+ def is_jina(self) -> bool:
709
+ r"""Returns whether this type of models is an Jina model."""
710
+ return self in {
711
+ EmbeddingModelType.JINA_EMBEDDINGS_V3,
712
+ EmbeddingModelType.JINA_CLIP_V2,
713
+ EmbeddingModelType.JINA_COLBERT_V2,
714
+ EmbeddingModelType.JINA_EMBEDDINGS_V2_BASE_CODE,
715
+ }
716
+
581
717
  @property
582
718
  def is_mistral(self) -> bool:
583
719
  r"""Returns whether this type of models is an Mistral-released
@@ -589,7 +725,20 @@ class EmbeddingModelType(Enum):
589
725
 
590
726
  @property
591
727
  def output_dim(self) -> int:
592
- if self is EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
728
+ if self in {
729
+ EmbeddingModelType.JINA_COLBERT_V2,
730
+ }:
731
+ return 128
732
+ elif self in {
733
+ EmbeddingModelType.JINA_EMBEDDINGS_V2_BASE_CODE,
734
+ }:
735
+ return 768
736
+ elif self in {
737
+ EmbeddingModelType.JINA_EMBEDDINGS_V3,
738
+ EmbeddingModelType.JINA_CLIP_V2,
739
+ }:
740
+ return 1024
741
+ elif self is EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
593
742
  return 1536
594
743
  elif self is EmbeddingModelType.TEXT_EMBEDDING_3_SMALL:
595
744
  return 1536
@@ -709,6 +858,8 @@ class ModelPlatformType(Enum):
709
858
  DEEPSEEK = "deepseek"
710
859
  SGLANG = "sglang"
711
860
  INTERNLM = "internlm"
861
+ MOONSHOT = "moonshot"
862
+ SILICONFLOW = "siliconflow"
712
863
 
713
864
  @property
714
865
  def is_openai(self) -> bool:
@@ -816,6 +967,16 @@ class ModelPlatformType(Enum):
816
967
  r"""Returns whether this platform is InternLM."""
817
968
  return self is ModelPlatformType.INTERNLM
818
969
 
970
+ @property
971
+ def is_moonshot(self) -> bool:
972
+ r"""Returns whether this platform is Moonshot model."""
973
+ return self is ModelPlatformType.MOONSHOT
974
+
975
+ @property
976
+ def is_siliconflow(self) -> bool:
977
+ r"""Returns whether this platform is SiliconFlow."""
978
+ return self is ModelPlatformType.SILICONFLOW
979
+
819
980
 
820
981
  class AudioModelType(Enum):
821
982
  TTS_1 = "tts-1"