camel-ai 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +29 -30
- camel/agents/knowledge_graph_agent.py +1 -5
- camel/agents/multi_hop_generator_agent.py +35 -3
- camel/agents/programmed_agent_instruction.py +73 -18
- camel/benchmarks/apibench.py +1 -5
- camel/benchmarks/nexus.py +1 -5
- camel/benchmarks/ragbench.py +2 -2
- camel/bots/telegram_bot.py +1 -5
- camel/configs/__init__.py +9 -0
- camel/configs/aiml_config.py +80 -0
- camel/configs/gemini_config.py +1 -1
- camel/configs/moonshot_config.py +63 -0
- camel/configs/sglang_config.py +4 -0
- camel/configs/siliconflow_config.py +91 -0
- camel/datagen/__init__.py +3 -1
- camel/datagen/self_improving_cot.py +821 -0
- camel/datagen/source2synth/__init__.py +31 -0
- camel/{synthetic_datagen → datagen}/source2synth/data_processor.py +194 -29
- camel/{synthetic_datagen → datagen}/source2synth/models.py +25 -0
- camel/{synthetic_datagen → datagen}/source2synth/user_data_processor_config.py +9 -8
- camel/datahubs/huggingface.py +3 -3
- camel/embeddings/__init__.py +2 -0
- camel/embeddings/jina_embedding.py +161 -0
- camel/messages/func_message.py +1 -1
- camel/models/__init__.py +4 -0
- camel/models/aiml_model.py +147 -0
- camel/models/deepseek_model.py +29 -11
- camel/models/groq_model.py +0 -2
- camel/models/model_factory.py +9 -0
- camel/models/moonshot_model.py +138 -0
- camel/models/openai_model.py +1 -9
- camel/models/siliconflow_model.py +142 -0
- camel/societies/workforce/role_playing_worker.py +2 -4
- camel/societies/workforce/single_agent_worker.py +1 -6
- camel/societies/workforce/workforce.py +3 -9
- camel/toolkits/__init__.py +4 -0
- camel/toolkits/reddit_toolkit.py +8 -38
- camel/toolkits/search_toolkit.py +17 -6
- camel/toolkits/semantic_scholar_toolkit.py +308 -0
- camel/toolkits/sympy_toolkit.py +778 -0
- camel/toolkits/whatsapp_toolkit.py +11 -32
- camel/types/enums.py +205 -16
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +7 -2
- camel/utils/commons.py +198 -21
- camel/utils/deduplication.py +199 -0
- camel/utils/token_counting.py +1 -39
- {camel_ai-0.2.18.dist-info → camel_ai-0.2.20.dist-info}/METADATA +17 -12
- {camel_ai-0.2.18.dist-info → camel_ai-0.2.20.dist-info}/RECORD +53 -41
- /camel/datagen/{cotdatagen.py → cot_datagen.py} +0 -0
- {camel_ai-0.2.18.dist-info → camel_ai-0.2.20.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.18.dist-info → camel_ai-0.2.20.dist-info}/WHEEL +0 -0
|
@@ -19,7 +19,7 @@ import requests
|
|
|
19
19
|
|
|
20
20
|
from camel.toolkits import FunctionTool
|
|
21
21
|
from camel.toolkits.base import BaseToolkit
|
|
22
|
-
from camel.utils
|
|
22
|
+
from camel.utils import retry_on_error
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
class WhatsAppToolkit(BaseToolkit):
|
|
@@ -36,18 +36,8 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
36
36
|
version (str): API version.
|
|
37
37
|
"""
|
|
38
38
|
|
|
39
|
-
def __init__(self
|
|
40
|
-
r"""Initializes the WhatsAppToolkit
|
|
41
|
-
retries and delay.
|
|
42
|
-
|
|
43
|
-
Args:
|
|
44
|
-
retries (int): Number of times to retry the request in case of
|
|
45
|
-
failure. (default: :obj:`3`)
|
|
46
|
-
delay (int): Time in seconds to wait between retries.
|
|
47
|
-
(default: :obj:`1`)
|
|
48
|
-
"""
|
|
49
|
-
self.retries = retries
|
|
50
|
-
self.delay = delay
|
|
39
|
+
def __init__(self):
|
|
40
|
+
r"""Initializes the WhatsAppToolkit."""
|
|
51
41
|
self.base_url = "https://graph.facebook.com"
|
|
52
42
|
self.version = "v17.0"
|
|
53
43
|
|
|
@@ -61,6 +51,7 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
61
51
|
"WHATSAPP_PHONE_NUMBER_ID environment variables."
|
|
62
52
|
)
|
|
63
53
|
|
|
54
|
+
@retry_on_error()
|
|
64
55
|
def send_message(
|
|
65
56
|
self, to: str, message: str
|
|
66
57
|
) -> Union[Dict[str, Any], str]:
|
|
@@ -88,19 +79,15 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
88
79
|
}
|
|
89
80
|
|
|
90
81
|
try:
|
|
91
|
-
response =
|
|
92
|
-
requests.post,
|
|
93
|
-
retries=self.retries,
|
|
94
|
-
delay=self.delay,
|
|
95
|
-
url=url,
|
|
96
|
-
headers=headers,
|
|
97
|
-
json=data,
|
|
98
|
-
)
|
|
82
|
+
response = requests.post(url=url, headers=headers, json=data)
|
|
99
83
|
response.raise_for_status()
|
|
100
84
|
return response.json()
|
|
85
|
+
except requests.exceptions.RequestException as e:
|
|
86
|
+
raise e
|
|
101
87
|
except Exception as e:
|
|
102
88
|
return f"Failed to send message: {e!s}"
|
|
103
89
|
|
|
90
|
+
@retry_on_error()
|
|
104
91
|
def get_message_templates(self) -> Union[List[Dict[str, Any]], str]:
|
|
105
92
|
r"""Retrieves all message templates for the WhatsApp Business account.
|
|
106
93
|
|
|
@@ -116,18 +103,13 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
116
103
|
headers = {"Authorization": f"Bearer {self.access_token}"}
|
|
117
104
|
|
|
118
105
|
try:
|
|
119
|
-
response =
|
|
120
|
-
requests.get,
|
|
121
|
-
retries=self.retries,
|
|
122
|
-
delay=self.delay,
|
|
123
|
-
url=url,
|
|
124
|
-
headers=headers,
|
|
125
|
-
)
|
|
106
|
+
response = requests.get(url=url, headers=headers)
|
|
126
107
|
response.raise_for_status()
|
|
127
108
|
return response.json().get("data", [])
|
|
128
109
|
except Exception as e:
|
|
129
110
|
return f"Failed to retrieve message templates: {e!s}"
|
|
130
111
|
|
|
112
|
+
@retry_on_error()
|
|
131
113
|
def get_business_profile(self) -> Union[Dict[str, Any], str]:
|
|
132
114
|
r"""Retrieves the WhatsApp Business profile information.
|
|
133
115
|
|
|
@@ -149,10 +131,7 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
149
131
|
}
|
|
150
132
|
|
|
151
133
|
try:
|
|
152
|
-
response =
|
|
153
|
-
requests.get,
|
|
154
|
-
retries=self.retries,
|
|
155
|
-
delay=self.delay,
|
|
134
|
+
response = requests.get(
|
|
156
135
|
url=url,
|
|
157
136
|
headers=headers,
|
|
158
137
|
params=params,
|
camel/types/enums.py
CHANGED
|
@@ -37,21 +37,29 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
37
37
|
O1 = "o1"
|
|
38
38
|
O1_PREVIEW = "o1-preview"
|
|
39
39
|
O1_MINI = "o1-mini"
|
|
40
|
+
O3_MINI = "o3-mini"
|
|
40
41
|
|
|
41
42
|
GLM_4 = "glm-4"
|
|
42
|
-
GLM_4V =
|
|
43
|
+
GLM_4V = "glm-4v"
|
|
44
|
+
GLM_4V_FLASH = "glm-4v-flash"
|
|
45
|
+
GLM_4V_PLUS_0111 = "glm-4v-plus-0111"
|
|
46
|
+
GLM_4_PLUS = "glm-4-plus"
|
|
47
|
+
GLM_4_AIR = "glm-4-air"
|
|
48
|
+
GLM_4_AIR_0111 = "glm-4-air-0111"
|
|
49
|
+
GLM_4_AIRX = "glm-4-airx"
|
|
50
|
+
GLM_4_LONG = "glm-4-long"
|
|
51
|
+
GLM_4_FLASHX = "glm-4-flashx"
|
|
52
|
+
GLM_4_FLASH = "glm-4-flash"
|
|
53
|
+
GLM_ZERO_PREVIEW = "glm-zero-preview"
|
|
43
54
|
GLM_3_TURBO = "glm-3-turbo"
|
|
44
55
|
|
|
45
56
|
# Groq platform models
|
|
46
57
|
GROQ_LLAMA_3_1_8B = "llama-3.1-8b-instant"
|
|
47
|
-
GROQ_LLAMA_3_1_70B = "llama-3.1-70b-versatile"
|
|
48
|
-
GROQ_LLAMA_3_1_405B = "llama-3.1-405b-reasoning"
|
|
49
58
|
GROQ_LLAMA_3_3_70B = "llama-3.3-70b-versatile"
|
|
50
59
|
GROQ_LLAMA_3_3_70B_PREVIEW = "llama-3.3-70b-specdec"
|
|
51
60
|
GROQ_LLAMA_3_8B = "llama3-8b-8192"
|
|
52
61
|
GROQ_LLAMA_3_70B = "llama3-70b-8192"
|
|
53
62
|
GROQ_MIXTRAL_8_7B = "mixtral-8x7b-32768"
|
|
54
|
-
GROQ_GEMMA_7B_IT = "gemma-7b-it"
|
|
55
63
|
GROQ_GEMMA_2_9B_IT = "gemma2-9b-it"
|
|
56
64
|
|
|
57
65
|
# TogetherAI platform models support tool calling
|
|
@@ -67,6 +75,17 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
67
75
|
SAMBA_LLAMA_3_1_70B = "Meta-Llama-3.1-70B-Instruct"
|
|
68
76
|
SAMBA_LLAMA_3_1_405B = "Meta-Llama-3.1-405B-Instruct"
|
|
69
77
|
|
|
78
|
+
# SGLang models support tool calling
|
|
79
|
+
SGLANG_LLAMA_3_1_8B = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
|
80
|
+
SGLANG_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
|
81
|
+
SGLANG_LLAMA_3_1_405B = "meta-llama/Meta-Llama-3.1-405B-Instruct"
|
|
82
|
+
SGLANG_LLAMA_3_2_1B = "meta-llama/Llama-3.2-1B-Instruct"
|
|
83
|
+
SGLANG_MIXTRAL_NEMO = "mistralai/Mistral-Nemo-Instruct-2407"
|
|
84
|
+
SGLANG_MISTRAL_7B = "mistralai/Mistral-7B-Instruct-v0.3"
|
|
85
|
+
SGLANG_QWEN_2_5_7B = "Qwen/Qwen2.5-7B-Instruct"
|
|
86
|
+
SGLANG_QWEN_2_5_32B = "Qwen/Qwen2.5-32B-Instruct"
|
|
87
|
+
SGLANG_QWEN_2_5_72B = "Qwen/Qwen2.5-72B-Instruct"
|
|
88
|
+
|
|
70
89
|
STUB = "stub"
|
|
71
90
|
|
|
72
91
|
# Legacy anthropic models
|
|
@@ -97,9 +116,12 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
97
116
|
NVIDIA_LLAMA3_3_70B_INSTRUCT = "meta/llama-3.3-70b-instruct"
|
|
98
117
|
|
|
99
118
|
# Gemini models
|
|
119
|
+
GEMINI_2_0_FLASH = "gemini-2.0-flash-exp"
|
|
120
|
+
GEMINI_2_0_FLASH_THINKING = "gemini-2.0-flash-thinking-exp"
|
|
121
|
+
GEMINI_2_0_PRO_EXP = "gemini-2.0-pro-exp-02-05"
|
|
122
|
+
GEMINI_2_0_FLASH_LITE_PREVIEW = "gemini-2.0-flash-lite-preview-02-05"
|
|
100
123
|
GEMINI_1_5_FLASH = "gemini-1.5-flash"
|
|
101
124
|
GEMINI_1_5_PRO = "gemini-1.5-pro"
|
|
102
|
-
GEMINI_EXP_1114 = "gemini-exp-1114"
|
|
103
125
|
|
|
104
126
|
# Mistral AI models
|
|
105
127
|
MISTRAL_3B = "ministral-3b-latest"
|
|
@@ -136,10 +158,12 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
136
158
|
QWEN_MATH_TURBO = "qwen-math-turbo"
|
|
137
159
|
QWEN_CODER_TURBO = "qwen-coder-turbo"
|
|
138
160
|
QWEN_2_5_CODER_32B = "qwen2.5-coder-32b-instruct"
|
|
161
|
+
QWEN_2_5_VL_72B = "qwen2.5-vl-72b-instruct"
|
|
139
162
|
QWEN_2_5_72B = "qwen2.5-72b-instruct"
|
|
140
163
|
QWEN_2_5_32B = "qwen2.5-32b-instruct"
|
|
141
164
|
QWEN_2_5_14B = "qwen2.5-14b-instruct"
|
|
142
165
|
QWEN_QWQ_32B = "qwq-32b-preview"
|
|
166
|
+
QWEN_QVQ_72B = "qvq-72b-preview"
|
|
143
167
|
|
|
144
168
|
# Yi models (01-ai)
|
|
145
169
|
YI_LIGHTNING = "yi-lightning"
|
|
@@ -161,6 +185,29 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
161
185
|
INTERNLM2_5_LATEST = "internlm2.5-latest"
|
|
162
186
|
INTERNLM2_PRO_CHAT = "internlm2-pro-chat"
|
|
163
187
|
|
|
188
|
+
# Moonshot models
|
|
189
|
+
MOONSHOT_V1_8K = "moonshot-v1-8k"
|
|
190
|
+
MOONSHOT_V1_32K = "moonshot-v1-32k"
|
|
191
|
+
MOONSHOT_V1_128K = "moonshot-v1-128k"
|
|
192
|
+
|
|
193
|
+
# SiliconFlow models support tool calling
|
|
194
|
+
SILICONFLOW_DEEPSEEK_V2_5 = "deepseek-ai/DeepSeek-V2.5"
|
|
195
|
+
SILICONFLOW_DEEPSEEK_V3 = "deepseek-ai/DeepSeek-V3"
|
|
196
|
+
SILICONFLOW_INTERN_LM2_5_20B_CHAT = "internlm/internlm2_5-20b-chat"
|
|
197
|
+
SILICONFLOW_INTERN_LM2_5_7B_CHAT = "internlm/internlm2_5-7b-chat"
|
|
198
|
+
SILICONFLOW_PRO_INTERN_LM2_5_7B_CHAT = "Pro/internlm/internlm2_5-7b-chat"
|
|
199
|
+
SILICONFLOW_QWEN2_5_72B_INSTRUCT = "Qwen/Qwen2.5-72B-Instruct"
|
|
200
|
+
SILICONFLOW_QWEN2_5_32B_INSTRUCT = "Qwen/Qwen2.5-32B-Instruct"
|
|
201
|
+
SILICONFLOW_QWEN2_5_14B_INSTRUCT = "Qwen/Qwen2.5-14B-Instruct"
|
|
202
|
+
SILICONFLOW_QWEN2_5_7B_INSTRUCT = "Qwen/Qwen2.5-7B-Instruct"
|
|
203
|
+
SILICONFLOW_PRO_QWEN2_5_7B_INSTRUCT = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
|
204
|
+
SILICONFLOW_THUDM_GLM_4_9B_CHAT = "THUDM/glm-4-9b-chat"
|
|
205
|
+
SILICONFLOW_PRO_THUDM_GLM_4_9B_CHAT = "Pro/THUDM/glm-4-9b-chat"
|
|
206
|
+
|
|
207
|
+
# AIML models support tool calling
|
|
208
|
+
AIML_MIXTRAL_8X7B = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
|
209
|
+
AIML_MISTRAL_7B_INSTRUCT = "mistralai/Mistral-7B-Instruct-v0.1"
|
|
210
|
+
|
|
164
211
|
def __str__(self):
|
|
165
212
|
return self.value
|
|
166
213
|
|
|
@@ -175,7 +222,11 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
175
222
|
|
|
176
223
|
@property
|
|
177
224
|
def support_native_structured_output(self) -> bool:
|
|
178
|
-
return
|
|
225
|
+
return any(
|
|
226
|
+
[
|
|
227
|
+
self.is_openai,
|
|
228
|
+
]
|
|
229
|
+
)
|
|
179
230
|
|
|
180
231
|
@property
|
|
181
232
|
def support_native_tool_calling(self) -> bool:
|
|
@@ -190,6 +241,12 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
190
241
|
self.is_internlm,
|
|
191
242
|
self.is_together,
|
|
192
243
|
self.is_sambanova,
|
|
244
|
+
self.is_groq,
|
|
245
|
+
self.is_sglang,
|
|
246
|
+
self.is_moonshot,
|
|
247
|
+
self.is_siliconflow,
|
|
248
|
+
self.is_zhipuai,
|
|
249
|
+
self.is_aiml,
|
|
193
250
|
]
|
|
194
251
|
)
|
|
195
252
|
|
|
@@ -205,6 +262,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
205
262
|
ModelType.O1,
|
|
206
263
|
ModelType.O1_PREVIEW,
|
|
207
264
|
ModelType.O1_MINI,
|
|
265
|
+
ModelType.O3_MINI,
|
|
208
266
|
}
|
|
209
267
|
|
|
210
268
|
@property
|
|
@@ -227,6 +285,16 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
227
285
|
ModelType.GLM_3_TURBO,
|
|
228
286
|
ModelType.GLM_4,
|
|
229
287
|
ModelType.GLM_4V,
|
|
288
|
+
ModelType.GLM_4V_FLASH,
|
|
289
|
+
ModelType.GLM_4V_PLUS_0111,
|
|
290
|
+
ModelType.GLM_4_PLUS,
|
|
291
|
+
ModelType.GLM_4_AIR,
|
|
292
|
+
ModelType.GLM_4_AIR_0111,
|
|
293
|
+
ModelType.GLM_4_AIRX,
|
|
294
|
+
ModelType.GLM_4_LONG,
|
|
295
|
+
ModelType.GLM_4_FLASHX,
|
|
296
|
+
ModelType.GLM_4_FLASH,
|
|
297
|
+
ModelType.GLM_ZERO_PREVIEW,
|
|
230
298
|
}
|
|
231
299
|
|
|
232
300
|
@property
|
|
@@ -252,14 +320,11 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
252
320
|
r"""Returns whether this type of models is served by Groq."""
|
|
253
321
|
return self in {
|
|
254
322
|
ModelType.GROQ_LLAMA_3_1_8B,
|
|
255
|
-
ModelType.GROQ_LLAMA_3_1_70B,
|
|
256
|
-
ModelType.GROQ_LLAMA_3_1_405B,
|
|
257
323
|
ModelType.GROQ_LLAMA_3_3_70B,
|
|
258
324
|
ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
|
|
259
325
|
ModelType.GROQ_LLAMA_3_8B,
|
|
260
326
|
ModelType.GROQ_LLAMA_3_70B,
|
|
261
327
|
ModelType.GROQ_MIXTRAL_8_7B,
|
|
262
|
-
ModelType.GROQ_GEMMA_7B_IT,
|
|
263
328
|
ModelType.GROQ_GEMMA_2_9B_IT,
|
|
264
329
|
}
|
|
265
330
|
|
|
@@ -277,7 +342,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
277
342
|
|
|
278
343
|
@property
|
|
279
344
|
def is_sambanova(self) -> bool:
|
|
280
|
-
r"""Returns whether this type of
|
|
345
|
+
r"""Returns whether this type of model is served by SambaNova AI."""
|
|
281
346
|
return self in {
|
|
282
347
|
ModelType.SAMBA_LLAMA_3_1_8B,
|
|
283
348
|
ModelType.SAMBA_LLAMA_3_1_70B,
|
|
@@ -326,9 +391,12 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
326
391
|
bool: Whether this type of models is gemini.
|
|
327
392
|
"""
|
|
328
393
|
return self in {
|
|
394
|
+
ModelType.GEMINI_2_0_FLASH,
|
|
329
395
|
ModelType.GEMINI_1_5_FLASH,
|
|
330
396
|
ModelType.GEMINI_1_5_PRO,
|
|
331
|
-
ModelType.
|
|
397
|
+
ModelType.GEMINI_2_0_FLASH_THINKING,
|
|
398
|
+
ModelType.GEMINI_2_0_PRO_EXP,
|
|
399
|
+
ModelType.GEMINI_2_0_FLASH_LITE_PREVIEW,
|
|
332
400
|
}
|
|
333
401
|
|
|
334
402
|
@property
|
|
@@ -391,10 +459,12 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
391
459
|
ModelType.QWEN_MATH_TURBO,
|
|
392
460
|
ModelType.QWEN_CODER_TURBO,
|
|
393
461
|
ModelType.QWEN_2_5_CODER_32B,
|
|
462
|
+
ModelType.QWEN_2_5_VL_72B,
|
|
394
463
|
ModelType.QWEN_2_5_72B,
|
|
395
464
|
ModelType.QWEN_2_5_32B,
|
|
396
465
|
ModelType.QWEN_2_5_14B,
|
|
397
466
|
ModelType.QWEN_QWQ_32B,
|
|
467
|
+
ModelType.QWEN_QVQ_72B,
|
|
398
468
|
}
|
|
399
469
|
|
|
400
470
|
@property
|
|
@@ -413,6 +483,52 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
413
483
|
ModelType.INTERNLM2_PRO_CHAT,
|
|
414
484
|
}
|
|
415
485
|
|
|
486
|
+
@property
|
|
487
|
+
def is_moonshot(self) -> bool:
|
|
488
|
+
return self in {
|
|
489
|
+
ModelType.MOONSHOT_V1_8K,
|
|
490
|
+
ModelType.MOONSHOT_V1_32K,
|
|
491
|
+
ModelType.MOONSHOT_V1_128K,
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
@property
|
|
495
|
+
def is_sglang(self) -> bool:
|
|
496
|
+
return self in {
|
|
497
|
+
ModelType.SGLANG_LLAMA_3_1_8B,
|
|
498
|
+
ModelType.SGLANG_LLAMA_3_1_70B,
|
|
499
|
+
ModelType.SGLANG_LLAMA_3_1_405B,
|
|
500
|
+
ModelType.SGLANG_LLAMA_3_2_1B,
|
|
501
|
+
ModelType.SGLANG_MIXTRAL_NEMO,
|
|
502
|
+
ModelType.SGLANG_MISTRAL_7B,
|
|
503
|
+
ModelType.SGLANG_QWEN_2_5_7B,
|
|
504
|
+
ModelType.SGLANG_QWEN_2_5_32B,
|
|
505
|
+
ModelType.SGLANG_QWEN_2_5_72B,
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
@property
|
|
509
|
+
def is_siliconflow(self) -> bool:
|
|
510
|
+
return self in {
|
|
511
|
+
ModelType.SILICONFLOW_DEEPSEEK_V2_5,
|
|
512
|
+
ModelType.SILICONFLOW_DEEPSEEK_V3,
|
|
513
|
+
ModelType.SILICONFLOW_INTERN_LM2_5_20B_CHAT,
|
|
514
|
+
ModelType.SILICONFLOW_INTERN_LM2_5_7B_CHAT,
|
|
515
|
+
ModelType.SILICONFLOW_PRO_INTERN_LM2_5_7B_CHAT,
|
|
516
|
+
ModelType.SILICONFLOW_QWEN2_5_72B_INSTRUCT,
|
|
517
|
+
ModelType.SILICONFLOW_QWEN2_5_32B_INSTRUCT,
|
|
518
|
+
ModelType.SILICONFLOW_QWEN2_5_14B_INSTRUCT,
|
|
519
|
+
ModelType.SILICONFLOW_QWEN2_5_7B_INSTRUCT,
|
|
520
|
+
ModelType.SILICONFLOW_PRO_QWEN2_5_7B_INSTRUCT,
|
|
521
|
+
ModelType.SILICONFLOW_THUDM_GLM_4_9B_CHAT,
|
|
522
|
+
ModelType.SILICONFLOW_PRO_THUDM_GLM_4_9B_CHAT,
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
@property
|
|
526
|
+
def is_aiml(self) -> bool:
|
|
527
|
+
return self in {
|
|
528
|
+
ModelType.AIML_MIXTRAL_8X7B,
|
|
529
|
+
ModelType.AIML_MISTRAL_7B_INSTRUCT,
|
|
530
|
+
}
|
|
531
|
+
|
|
416
532
|
@property
|
|
417
533
|
def token_limit(self) -> int:
|
|
418
534
|
r"""Returns the maximum token limit for a given model.
|
|
@@ -440,13 +556,15 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
440
556
|
ModelType.GROQ_LLAMA_3_8B,
|
|
441
557
|
ModelType.GROQ_LLAMA_3_70B,
|
|
442
558
|
ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
|
|
443
|
-
ModelType.GROQ_GEMMA_7B_IT,
|
|
444
559
|
ModelType.GROQ_GEMMA_2_9B_IT,
|
|
445
560
|
ModelType.GLM_3_TURBO,
|
|
446
561
|
ModelType.GLM_4,
|
|
447
562
|
ModelType.QWEN_VL_PLUS,
|
|
448
563
|
ModelType.NVIDIA_LLAMA3_70B,
|
|
449
564
|
ModelType.TOGETHER_MISTRAL_7B,
|
|
565
|
+
ModelType.MOONSHOT_V1_8K,
|
|
566
|
+
ModelType.GLM_4V_FLASH,
|
|
567
|
+
ModelType.GLM_4_AIRX,
|
|
450
568
|
}:
|
|
451
569
|
return 8_192
|
|
452
570
|
elif self in {
|
|
@@ -459,6 +577,8 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
459
577
|
ModelType.YI_LARGE_RAG,
|
|
460
578
|
ModelType.SAMBA_LLAMA_3_1_8B,
|
|
461
579
|
ModelType.SAMBA_LLAMA_3_1_405B,
|
|
580
|
+
ModelType.GLM_4V_PLUS_0111,
|
|
581
|
+
ModelType.GLM_ZERO_PREVIEW,
|
|
462
582
|
}:
|
|
463
583
|
return 16_384
|
|
464
584
|
elif self in {
|
|
@@ -474,11 +594,16 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
474
594
|
ModelType.NVIDIA_MISTRAL_LARGE,
|
|
475
595
|
ModelType.NVIDIA_MIXTRAL_8X7B,
|
|
476
596
|
ModelType.QWEN_QWQ_32B,
|
|
597
|
+
ModelType.QWEN_QVQ_72B,
|
|
477
598
|
ModelType.INTERNLM3_8B_INSTRUCT,
|
|
478
599
|
ModelType.INTERNLM3_LATEST,
|
|
479
600
|
ModelType.INTERNLM2_5_LATEST,
|
|
480
601
|
ModelType.INTERNLM2_PRO_CHAT,
|
|
481
602
|
ModelType.TOGETHER_MIXTRAL_8_7B,
|
|
603
|
+
ModelType.SGLANG_MISTRAL_7B,
|
|
604
|
+
ModelType.MOONSHOT_V1_32K,
|
|
605
|
+
ModelType.AIML_MIXTRAL_8X7B,
|
|
606
|
+
ModelType.AIML_MISTRAL_7B_INSTRUCT,
|
|
482
607
|
}:
|
|
483
608
|
return 32_768
|
|
484
609
|
elif self in {
|
|
@@ -504,6 +629,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
504
629
|
ModelType.MISTRAL_8B,
|
|
505
630
|
ModelType.MISTRAL_3B,
|
|
506
631
|
ModelType.QWEN_2_5_CODER_32B,
|
|
632
|
+
ModelType.QWEN_2_5_VL_72B,
|
|
507
633
|
ModelType.QWEN_2_5_72B,
|
|
508
634
|
ModelType.QWEN_2_5_32B,
|
|
509
635
|
ModelType.QWEN_2_5_14B,
|
|
@@ -518,12 +644,21 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
518
644
|
ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT,
|
|
519
645
|
ModelType.GROQ_LLAMA_3_3_70B,
|
|
520
646
|
ModelType.SAMBA_LLAMA_3_1_70B,
|
|
647
|
+
ModelType.SGLANG_LLAMA_3_1_8B,
|
|
648
|
+
ModelType.SGLANG_LLAMA_3_1_70B,
|
|
649
|
+
ModelType.SGLANG_LLAMA_3_1_405B,
|
|
650
|
+
ModelType.SGLANG_LLAMA_3_2_1B,
|
|
651
|
+
ModelType.SGLANG_MIXTRAL_NEMO,
|
|
652
|
+
ModelType.MOONSHOT_V1_128K,
|
|
653
|
+
ModelType.GLM_4_PLUS,
|
|
654
|
+
ModelType.GLM_4_AIR,
|
|
655
|
+
ModelType.GLM_4_AIR_0111,
|
|
656
|
+
ModelType.GLM_4_FLASHX,
|
|
657
|
+
ModelType.GLM_4_FLASH,
|
|
521
658
|
}:
|
|
522
659
|
return 128_000
|
|
523
660
|
elif self in {
|
|
524
661
|
ModelType.GROQ_LLAMA_3_1_8B,
|
|
525
|
-
ModelType.GROQ_LLAMA_3_1_70B,
|
|
526
|
-
ModelType.GROQ_LLAMA_3_1_405B,
|
|
527
662
|
ModelType.QWEN_PLUS,
|
|
528
663
|
ModelType.QWEN_TURBO,
|
|
529
664
|
ModelType.QWEN_CODER_TURBO,
|
|
@@ -531,10 +666,14 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
531
666
|
ModelType.TOGETHER_LLAMA_3_1_70B,
|
|
532
667
|
ModelType.TOGETHER_LLAMA_3_1_405B,
|
|
533
668
|
ModelType.TOGETHER_LLAMA_3_3_70B,
|
|
669
|
+
ModelType.SGLANG_QWEN_2_5_7B,
|
|
670
|
+
ModelType.SGLANG_QWEN_2_5_32B,
|
|
671
|
+
ModelType.SGLANG_QWEN_2_5_72B,
|
|
534
672
|
}:
|
|
535
673
|
return 131_072
|
|
536
674
|
elif self in {
|
|
537
675
|
ModelType.O1,
|
|
676
|
+
ModelType.O3_MINI,
|
|
538
677
|
ModelType.CLAUDE_2_1,
|
|
539
678
|
ModelType.CLAUDE_3_OPUS,
|
|
540
679
|
ModelType.CLAUDE_3_SONNET,
|
|
@@ -549,9 +688,13 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
549
688
|
}:
|
|
550
689
|
return 256_000
|
|
551
690
|
elif self in {
|
|
691
|
+
ModelType.GEMINI_2_0_FLASH,
|
|
552
692
|
ModelType.GEMINI_1_5_FLASH,
|
|
553
693
|
ModelType.GEMINI_1_5_PRO,
|
|
554
|
-
ModelType.
|
|
694
|
+
ModelType.GEMINI_2_0_FLASH_THINKING,
|
|
695
|
+
ModelType.GEMINI_2_0_FLASH_LITE_PREVIEW,
|
|
696
|
+
ModelType.GEMINI_2_0_PRO_EXP, # Not given in doc, assume the same
|
|
697
|
+
ModelType.GLM_4_LONG,
|
|
555
698
|
}:
|
|
556
699
|
return 1_048_576
|
|
557
700
|
elif self in {
|
|
@@ -567,6 +710,11 @@ class EmbeddingModelType(Enum):
|
|
|
567
710
|
TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small"
|
|
568
711
|
TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
|
|
569
712
|
|
|
713
|
+
JINA_EMBEDDINGS_V3 = "jina-embeddings-v3"
|
|
714
|
+
JINA_CLIP_V2 = "jina-clip-v2"
|
|
715
|
+
JINA_COLBERT_V2 = "jina-colbert-v2"
|
|
716
|
+
JINA_EMBEDDINGS_V2_BASE_CODE = "jina-embeddings-v2-base-code"
|
|
717
|
+
|
|
570
718
|
MISTRAL_EMBED = "mistral-embed"
|
|
571
719
|
|
|
572
720
|
@property
|
|
@@ -578,6 +726,16 @@ class EmbeddingModelType(Enum):
|
|
|
578
726
|
EmbeddingModelType.TEXT_EMBEDDING_3_LARGE,
|
|
579
727
|
}
|
|
580
728
|
|
|
729
|
+
@property
|
|
730
|
+
def is_jina(self) -> bool:
|
|
731
|
+
r"""Returns whether this type of models is an Jina model."""
|
|
732
|
+
return self in {
|
|
733
|
+
EmbeddingModelType.JINA_EMBEDDINGS_V3,
|
|
734
|
+
EmbeddingModelType.JINA_CLIP_V2,
|
|
735
|
+
EmbeddingModelType.JINA_COLBERT_V2,
|
|
736
|
+
EmbeddingModelType.JINA_EMBEDDINGS_V2_BASE_CODE,
|
|
737
|
+
}
|
|
738
|
+
|
|
581
739
|
@property
|
|
582
740
|
def is_mistral(self) -> bool:
|
|
583
741
|
r"""Returns whether this type of models is an Mistral-released
|
|
@@ -589,7 +747,20 @@ class EmbeddingModelType(Enum):
|
|
|
589
747
|
|
|
590
748
|
@property
|
|
591
749
|
def output_dim(self) -> int:
|
|
592
|
-
if self
|
|
750
|
+
if self in {
|
|
751
|
+
EmbeddingModelType.JINA_COLBERT_V2,
|
|
752
|
+
}:
|
|
753
|
+
return 128
|
|
754
|
+
elif self in {
|
|
755
|
+
EmbeddingModelType.JINA_EMBEDDINGS_V2_BASE_CODE,
|
|
756
|
+
}:
|
|
757
|
+
return 768
|
|
758
|
+
elif self in {
|
|
759
|
+
EmbeddingModelType.JINA_EMBEDDINGS_V3,
|
|
760
|
+
EmbeddingModelType.JINA_CLIP_V2,
|
|
761
|
+
}:
|
|
762
|
+
return 1024
|
|
763
|
+
elif self is EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
|
|
593
764
|
return 1536
|
|
594
765
|
elif self is EmbeddingModelType.TEXT_EMBEDDING_3_SMALL:
|
|
595
766
|
return 1536
|
|
@@ -709,6 +880,9 @@ class ModelPlatformType(Enum):
|
|
|
709
880
|
DEEPSEEK = "deepseek"
|
|
710
881
|
SGLANG = "sglang"
|
|
711
882
|
INTERNLM = "internlm"
|
|
883
|
+
MOONSHOT = "moonshot"
|
|
884
|
+
SILICONFLOW = "siliconflow"
|
|
885
|
+
AIML = "aiml"
|
|
712
886
|
|
|
713
887
|
@property
|
|
714
888
|
def is_openai(self) -> bool:
|
|
@@ -816,6 +990,21 @@ class ModelPlatformType(Enum):
|
|
|
816
990
|
r"""Returns whether this platform is InternLM."""
|
|
817
991
|
return self is ModelPlatformType.INTERNLM
|
|
818
992
|
|
|
993
|
+
@property
|
|
994
|
+
def is_moonshot(self) -> bool:
|
|
995
|
+
r"""Returns whether this platform is Moonshot model."""
|
|
996
|
+
return self is ModelPlatformType.MOONSHOT
|
|
997
|
+
|
|
998
|
+
@property
|
|
999
|
+
def is_siliconflow(self) -> bool:
|
|
1000
|
+
r"""Returns whether this platform is SiliconFlow."""
|
|
1001
|
+
return self is ModelPlatformType.SILICONFLOW
|
|
1002
|
+
|
|
1003
|
+
@property
|
|
1004
|
+
def is_aiml(self) -> bool:
|
|
1005
|
+
r"""Returns whether this platform is AIML."""
|
|
1006
|
+
return self is ModelPlatformType.AIML
|
|
1007
|
+
|
|
819
1008
|
|
|
820
1009
|
class AudioModelType(Enum):
|
|
821
1010
|
TTS_1 = "tts-1"
|
|
@@ -118,6 +118,11 @@ class UnifiedModelType(str):
|
|
|
118
118
|
r"""Returns whether the model is a InternLM model."""
|
|
119
119
|
return True
|
|
120
120
|
|
|
121
|
+
@property
|
|
122
|
+
def is_moonshot(self) -> bool:
|
|
123
|
+
r"""Returns whether this platform is Moonshot model."""
|
|
124
|
+
return True
|
|
125
|
+
|
|
121
126
|
@property
|
|
122
127
|
def support_native_structured_output(self) -> bool:
|
|
123
128
|
r"""Returns whether the model supports native structured output."""
|
camel/utils/__init__.py
CHANGED
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
from .commons import (
|
|
16
16
|
AgentOpsMeta,
|
|
17
|
+
BatchProcessor,
|
|
17
18
|
agentops_decorator,
|
|
18
19
|
api_keys_required,
|
|
19
20
|
check_server_running,
|
|
@@ -33,16 +34,17 @@ from .commons import (
|
|
|
33
34
|
is_docker_running,
|
|
34
35
|
json_to_function_code,
|
|
35
36
|
print_text_animated,
|
|
37
|
+
retry_on_error,
|
|
36
38
|
text_extract_from_web,
|
|
37
39
|
to_pascal,
|
|
38
40
|
track_agent,
|
|
39
41
|
)
|
|
40
42
|
from .constants import Constants
|
|
43
|
+
from .deduplication import DeduplicationResult, deduplicate_internally
|
|
41
44
|
from .response_format import get_pydantic_model
|
|
42
45
|
from .token_counting import (
|
|
43
46
|
AnthropicTokenCounter,
|
|
44
47
|
BaseTokenCounter,
|
|
45
|
-
GeminiTokenCounter,
|
|
46
48
|
LiteLLMTokenCounter,
|
|
47
49
|
MistralTokenCounter,
|
|
48
50
|
OpenAITokenCounter,
|
|
@@ -69,7 +71,6 @@ __all__ = [
|
|
|
69
71
|
"dependencies_required",
|
|
70
72
|
"api_keys_required",
|
|
71
73
|
"is_docker_running",
|
|
72
|
-
"GeminiTokenCounter",
|
|
73
74
|
"MistralTokenCounter",
|
|
74
75
|
"get_pydantic_major_version",
|
|
75
76
|
"get_pydantic_object_schema",
|
|
@@ -82,4 +83,8 @@ __all__ = [
|
|
|
82
83
|
"get_pydantic_model",
|
|
83
84
|
"download_github_subdirectory",
|
|
84
85
|
"generate_prompt_for_structured_output",
|
|
86
|
+
"deduplicate_internally",
|
|
87
|
+
"DeduplicationResult",
|
|
88
|
+
"retry_on_error",
|
|
89
|
+
"BatchProcessor",
|
|
85
90
|
]
|