camel-ai 0.2.61__py3-none-any.whl → 0.2.64__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +75 -16
- camel/agents/mcp_agent.py +10 -6
- camel/configs/__init__.py +3 -0
- camel/configs/crynux_config.py +94 -0
- camel/{data_collector → data_collectors}/alpaca_collector.py +1 -1
- camel/{data_collector → data_collectors}/sharegpt_collector.py +1 -1
- camel/interpreters/base.py +14 -1
- camel/interpreters/docker/Dockerfile +63 -7
- camel/interpreters/docker_interpreter.py +65 -7
- camel/interpreters/e2b_interpreter.py +23 -8
- camel/interpreters/internal_python_interpreter.py +30 -2
- camel/interpreters/ipython_interpreter.py +21 -3
- camel/interpreters/subprocess_interpreter.py +34 -2
- camel/memories/records.py +5 -3
- camel/models/__init__.py +2 -0
- camel/models/azure_openai_model.py +101 -25
- camel/models/cohere_model.py +65 -0
- camel/models/crynux_model.py +94 -0
- camel/models/deepseek_model.py +43 -1
- camel/models/gemini_model.py +50 -4
- camel/models/litellm_model.py +38 -0
- camel/models/mistral_model.py +66 -0
- camel/models/model_factory.py +10 -1
- camel/models/openai_compatible_model.py +81 -17
- camel/models/openai_model.py +86 -16
- camel/models/reka_model.py +69 -0
- camel/models/samba_model.py +69 -2
- camel/models/sglang_model.py +74 -2
- camel/models/watsonx_model.py +62 -0
- camel/retrievers/auto_retriever.py +20 -1
- camel/{runtime → runtimes}/daytona_runtime.py +1 -1
- camel/{runtime → runtimes}/docker_runtime.py +1 -1
- camel/{runtime → runtimes}/llm_guard_runtime.py +2 -2
- camel/{runtime → runtimes}/remote_http_runtime.py +1 -1
- camel/{runtime → runtimes}/ubuntu_docker_runtime.py +1 -1
- camel/societies/workforce/base.py +7 -3
- camel/societies/workforce/role_playing_worker.py +2 -2
- camel/societies/workforce/single_agent_worker.py +25 -1
- camel/societies/workforce/worker.py +5 -3
- camel/societies/workforce/workforce.py +409 -7
- camel/storages/__init__.py +2 -0
- camel/storages/vectordb_storages/__init__.py +2 -0
- camel/storages/vectordb_storages/weaviate.py +714 -0
- camel/tasks/task.py +19 -10
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/code_execution.py +37 -8
- camel/toolkits/file_write_toolkit.py +4 -2
- camel/toolkits/mcp_toolkit.py +480 -733
- camel/toolkits/pptx_toolkit.py +777 -0
- camel/types/enums.py +56 -1
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +16 -0
- camel/utils/langfuse.py +258 -0
- camel/utils/mcp_client.py +1046 -0
- {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/METADATA +9 -1
- {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/RECORD +68 -62
- /camel/{data_collector → data_collectors}/__init__.py +0 -0
- /camel/{data_collector → data_collectors}/base.py +0 -0
- /camel/{runtime → runtimes}/__init__.py +0 -0
- /camel/{runtime → runtimes}/api.py +0 -0
- /camel/{runtime → runtimes}/base.py +0 -0
- /camel/{runtime → runtimes}/configs.py +0 -0
- /camel/{runtime → runtimes}/utils/__init__.py +0 -0
- /camel/{runtime → runtimes}/utils/function_risk_toolkit.py +0 -0
- /camel/{runtime → runtimes}/utils/ignore_risk_toolkit.py +0 -0
- {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/licenses/LICENSE +0 -0
camel/models/openai_model.py
CHANGED
|
@@ -30,8 +30,20 @@ from camel.utils import (
|
|
|
30
30
|
BaseTokenCounter,
|
|
31
31
|
OpenAITokenCounter,
|
|
32
32
|
api_keys_required,
|
|
33
|
+
get_current_agent_session_id,
|
|
34
|
+
is_langfuse_available,
|
|
35
|
+
update_langfuse_trace,
|
|
33
36
|
)
|
|
34
37
|
|
|
38
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
39
|
+
try:
|
|
40
|
+
from langfuse.decorators import observe
|
|
41
|
+
except ImportError:
|
|
42
|
+
from camel.utils import observe
|
|
43
|
+
else:
|
|
44
|
+
from camel.utils import observe
|
|
45
|
+
|
|
46
|
+
|
|
35
47
|
UNSUPPORTED_PARAMS = {
|
|
36
48
|
"temperature",
|
|
37
49
|
"top_p",
|
|
@@ -90,18 +102,35 @@ class OpenAIModel(BaseModelBackend):
|
|
|
90
102
|
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
91
103
|
)
|
|
92
104
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
+
if is_langfuse_available():
|
|
106
|
+
from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
|
|
107
|
+
from langfuse.openai import OpenAI as LangfuseOpenAI
|
|
108
|
+
|
|
109
|
+
self._client = LangfuseOpenAI(
|
|
110
|
+
timeout=self._timeout,
|
|
111
|
+
max_retries=3,
|
|
112
|
+
base_url=self._url,
|
|
113
|
+
api_key=self._api_key,
|
|
114
|
+
)
|
|
115
|
+
self._async_client = LangfuseAsyncOpenAI(
|
|
116
|
+
timeout=self._timeout,
|
|
117
|
+
max_retries=3,
|
|
118
|
+
base_url=self._url,
|
|
119
|
+
api_key=self._api_key,
|
|
120
|
+
)
|
|
121
|
+
else:
|
|
122
|
+
self._client = OpenAI(
|
|
123
|
+
timeout=self._timeout,
|
|
124
|
+
max_retries=3,
|
|
125
|
+
base_url=self._url,
|
|
126
|
+
api_key=self._api_key,
|
|
127
|
+
)
|
|
128
|
+
self._async_client = AsyncOpenAI(
|
|
129
|
+
timeout=self._timeout,
|
|
130
|
+
max_retries=3,
|
|
131
|
+
base_url=self._url,
|
|
132
|
+
api_key=self._api_key,
|
|
133
|
+
)
|
|
105
134
|
|
|
106
135
|
def _sanitize_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
107
136
|
r"""Sanitize the model configuration for O1 models."""
|
|
@@ -185,6 +214,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
185
214
|
self._token_counter = OpenAITokenCounter(self.model_type)
|
|
186
215
|
return self._token_counter
|
|
187
216
|
|
|
217
|
+
@observe()
|
|
188
218
|
def _run(
|
|
189
219
|
self,
|
|
190
220
|
messages: List[OpenAIMessage],
|
|
@@ -206,15 +236,35 @@ class OpenAIModel(BaseModelBackend):
|
|
|
206
236
|
`ChatCompletion` in the non-stream mode, or
|
|
207
237
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
208
238
|
"""
|
|
239
|
+
|
|
240
|
+
# Update Langfuse trace with current agent session and metadata
|
|
241
|
+
agent_session_id = get_current_agent_session_id()
|
|
242
|
+
if agent_session_id:
|
|
243
|
+
update_langfuse_trace(
|
|
244
|
+
session_id=agent_session_id,
|
|
245
|
+
metadata={
|
|
246
|
+
"source": "camel",
|
|
247
|
+
"agent_id": agent_session_id,
|
|
248
|
+
"agent_type": "camel_chat_agent",
|
|
249
|
+
"model_type": str(self.model_type),
|
|
250
|
+
},
|
|
251
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
252
|
+
)
|
|
253
|
+
|
|
209
254
|
messages = self._adapt_messages_for_o1_models(messages)
|
|
210
255
|
response_format = response_format or self.model_config_dict.get(
|
|
211
256
|
"response_format", None
|
|
212
257
|
)
|
|
213
258
|
if response_format:
|
|
214
|
-
|
|
259
|
+
result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
|
|
260
|
+
self._request_parse(messages, response_format, tools)
|
|
261
|
+
)
|
|
215
262
|
else:
|
|
216
|
-
|
|
263
|
+
result = self._request_chat_completion(messages, tools)
|
|
264
|
+
|
|
265
|
+
return result
|
|
217
266
|
|
|
267
|
+
@observe()
|
|
218
268
|
async def _arun(
|
|
219
269
|
self,
|
|
220
270
|
messages: List[OpenAIMessage],
|
|
@@ -236,13 +286,33 @@ class OpenAIModel(BaseModelBackend):
|
|
|
236
286
|
`ChatCompletion` in the non-stream mode, or
|
|
237
287
|
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
238
288
|
"""
|
|
289
|
+
|
|
290
|
+
# Update Langfuse trace with current agent session and metadata
|
|
291
|
+
agent_session_id = get_current_agent_session_id()
|
|
292
|
+
if agent_session_id:
|
|
293
|
+
update_langfuse_trace(
|
|
294
|
+
session_id=agent_session_id,
|
|
295
|
+
metadata={
|
|
296
|
+
"source": "camel",
|
|
297
|
+
"agent_id": agent_session_id,
|
|
298
|
+
"agent_type": "camel_chat_agent",
|
|
299
|
+
"model_type": str(self.model_type),
|
|
300
|
+
},
|
|
301
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
messages = self._adapt_messages_for_o1_models(messages)
|
|
239
305
|
response_format = response_format or self.model_config_dict.get(
|
|
240
306
|
"response_format", None
|
|
241
307
|
)
|
|
242
308
|
if response_format:
|
|
243
|
-
|
|
309
|
+
result: Union[
|
|
310
|
+
ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
311
|
+
] = await self._arequest_parse(messages, response_format, tools)
|
|
244
312
|
else:
|
|
245
|
-
|
|
313
|
+
result = await self._arequest_chat_completion(messages, tools)
|
|
314
|
+
|
|
315
|
+
return result
|
|
246
316
|
|
|
247
317
|
def _request_chat_completion(
|
|
248
318
|
self,
|
camel/models/reka_model.py
CHANGED
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
14
15
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
|
|
15
16
|
|
|
16
17
|
from pydantic import BaseModel
|
|
@@ -24,8 +25,19 @@ from camel.utils import (
|
|
|
24
25
|
OpenAITokenCounter,
|
|
25
26
|
api_keys_required,
|
|
26
27
|
dependencies_required,
|
|
28
|
+
get_current_agent_session_id,
|
|
29
|
+
update_current_observation,
|
|
30
|
+
update_langfuse_trace,
|
|
27
31
|
)
|
|
28
32
|
|
|
33
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
34
|
+
try:
|
|
35
|
+
from langfuse.decorators import observe
|
|
36
|
+
except ImportError:
|
|
37
|
+
from camel.utils import observe
|
|
38
|
+
else:
|
|
39
|
+
from camel.utils import observe
|
|
40
|
+
|
|
29
41
|
if TYPE_CHECKING:
|
|
30
42
|
from reka.types import ChatMessage, ChatResponse
|
|
31
43
|
|
|
@@ -188,6 +200,7 @@ class RekaModel(BaseModelBackend):
|
|
|
188
200
|
)
|
|
189
201
|
return self._token_counter
|
|
190
202
|
|
|
203
|
+
@observe(as_type="generation")
|
|
191
204
|
async def _arun(
|
|
192
205
|
self,
|
|
193
206
|
messages: List[OpenAIMessage],
|
|
@@ -203,6 +216,29 @@ class RekaModel(BaseModelBackend):
|
|
|
203
216
|
Returns:
|
|
204
217
|
ChatCompletion.
|
|
205
218
|
"""
|
|
219
|
+
|
|
220
|
+
update_current_observation(
|
|
221
|
+
input={
|
|
222
|
+
"messages": messages,
|
|
223
|
+
"tools": tools,
|
|
224
|
+
},
|
|
225
|
+
model=str(self.model_type),
|
|
226
|
+
model_parameters=self.model_config_dict,
|
|
227
|
+
)
|
|
228
|
+
# Update Langfuse trace with current agent session and metadata
|
|
229
|
+
agent_session_id = get_current_agent_session_id()
|
|
230
|
+
if agent_session_id:
|
|
231
|
+
update_langfuse_trace(
|
|
232
|
+
session_id=agent_session_id,
|
|
233
|
+
metadata={
|
|
234
|
+
"source": "camel",
|
|
235
|
+
"agent_id": agent_session_id,
|
|
236
|
+
"agent_type": "camel_chat_agent",
|
|
237
|
+
"model_type": str(self.model_type),
|
|
238
|
+
},
|
|
239
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
240
|
+
)
|
|
241
|
+
|
|
206
242
|
reka_messages = self._convert_openai_to_reka_messages(messages)
|
|
207
243
|
|
|
208
244
|
response = await self._async_client.chat.create(
|
|
@@ -213,6 +249,10 @@ class RekaModel(BaseModelBackend):
|
|
|
213
249
|
|
|
214
250
|
openai_response = self._convert_reka_to_openai_response(response)
|
|
215
251
|
|
|
252
|
+
update_current_observation(
|
|
253
|
+
usage=openai_response.usage,
|
|
254
|
+
)
|
|
255
|
+
|
|
216
256
|
# Add AgentOps LLM Event tracking
|
|
217
257
|
if LLMEvent:
|
|
218
258
|
llm_event = LLMEvent(
|
|
@@ -229,6 +269,7 @@ class RekaModel(BaseModelBackend):
|
|
|
229
269
|
|
|
230
270
|
return openai_response
|
|
231
271
|
|
|
272
|
+
@observe(as_type="generation")
|
|
232
273
|
def _run(
|
|
233
274
|
self,
|
|
234
275
|
messages: List[OpenAIMessage],
|
|
@@ -244,6 +285,30 @@ class RekaModel(BaseModelBackend):
|
|
|
244
285
|
Returns:
|
|
245
286
|
ChatCompletion.
|
|
246
287
|
"""
|
|
288
|
+
|
|
289
|
+
update_current_observation(
|
|
290
|
+
input={
|
|
291
|
+
"messages": messages,
|
|
292
|
+
"tools": tools,
|
|
293
|
+
},
|
|
294
|
+
model=str(self.model_type),
|
|
295
|
+
model_parameters=self.model_config_dict,
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
# Update Langfuse trace with current agent session and metadata
|
|
299
|
+
agent_session_id = get_current_agent_session_id()
|
|
300
|
+
if agent_session_id:
|
|
301
|
+
update_langfuse_trace(
|
|
302
|
+
session_id=agent_session_id,
|
|
303
|
+
metadata={
|
|
304
|
+
"source": "camel",
|
|
305
|
+
"agent_id": agent_session_id,
|
|
306
|
+
"agent_type": "camel_chat_agent",
|
|
307
|
+
"model_type": str(self.model_type),
|
|
308
|
+
},
|
|
309
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
310
|
+
)
|
|
311
|
+
|
|
247
312
|
reka_messages = self._convert_openai_to_reka_messages(messages)
|
|
248
313
|
|
|
249
314
|
response = self._client.chat.create(
|
|
@@ -254,6 +319,10 @@ class RekaModel(BaseModelBackend):
|
|
|
254
319
|
|
|
255
320
|
openai_response = self._convert_reka_to_openai_response(response)
|
|
256
321
|
|
|
322
|
+
update_current_observation(
|
|
323
|
+
usage=openai_response.usage,
|
|
324
|
+
)
|
|
325
|
+
|
|
257
326
|
# Add AgentOps LLM Event tracking
|
|
258
327
|
if LLMEvent:
|
|
259
328
|
llm_event = LLMEvent(
|
camel/models/samba_model.py
CHANGED
|
@@ -38,6 +38,9 @@ from camel.utils import (
|
|
|
38
38
|
BaseTokenCounter,
|
|
39
39
|
OpenAITokenCounter,
|
|
40
40
|
api_keys_required,
|
|
41
|
+
get_current_agent_session_id,
|
|
42
|
+
update_current_observation,
|
|
43
|
+
update_langfuse_trace,
|
|
41
44
|
)
|
|
42
45
|
|
|
43
46
|
try:
|
|
@@ -48,6 +51,14 @@ try:
|
|
|
48
51
|
except (ImportError, AttributeError):
|
|
49
52
|
LLMEvent = None
|
|
50
53
|
|
|
54
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
55
|
+
try:
|
|
56
|
+
from langfuse.decorators import observe
|
|
57
|
+
except ImportError:
|
|
58
|
+
from camel.utils import observe
|
|
59
|
+
else:
|
|
60
|
+
from camel.utils import observe
|
|
61
|
+
|
|
51
62
|
|
|
52
63
|
class SambaModel(BaseModelBackend):
|
|
53
64
|
r"""SambaNova service interface.
|
|
@@ -161,6 +172,7 @@ class SambaModel(BaseModelBackend):
|
|
|
161
172
|
" SambaNova service"
|
|
162
173
|
)
|
|
163
174
|
|
|
175
|
+
@observe(as_type="generation")
|
|
164
176
|
async def _arun( # type: ignore[misc]
|
|
165
177
|
self,
|
|
166
178
|
messages: List[OpenAIMessage],
|
|
@@ -178,13 +190,42 @@ class SambaModel(BaseModelBackend):
|
|
|
178
190
|
`ChatCompletion` in the non-stream mode, or
|
|
179
191
|
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
180
192
|
"""
|
|
193
|
+
|
|
194
|
+
update_current_observation(
|
|
195
|
+
input={
|
|
196
|
+
"messages": messages,
|
|
197
|
+
"tools": tools,
|
|
198
|
+
},
|
|
199
|
+
model=str(self.model_type),
|
|
200
|
+
model_parameters=self.model_config_dict,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# Update Langfuse trace with current agent session and metadata
|
|
204
|
+
agent_session_id = get_current_agent_session_id()
|
|
205
|
+
if agent_session_id:
|
|
206
|
+
update_langfuse_trace(
|
|
207
|
+
session_id=agent_session_id,
|
|
208
|
+
metadata={
|
|
209
|
+
"source": "camel",
|
|
210
|
+
"agent_id": agent_session_id,
|
|
211
|
+
"agent_type": "camel_chat_agent",
|
|
212
|
+
"model_type": str(self.model_type),
|
|
213
|
+
},
|
|
214
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
215
|
+
)
|
|
216
|
+
|
|
181
217
|
if "tools" in self.model_config_dict:
|
|
182
218
|
del self.model_config_dict["tools"]
|
|
183
219
|
if self.model_config_dict.get("stream") is True:
|
|
184
220
|
return await self._arun_streaming(messages)
|
|
185
221
|
else:
|
|
186
|
-
|
|
222
|
+
response = await self._arun_non_streaming(messages)
|
|
223
|
+
update_current_observation(
|
|
224
|
+
usage=response.usage,
|
|
225
|
+
)
|
|
226
|
+
return response
|
|
187
227
|
|
|
228
|
+
@observe(as_type="generation")
|
|
188
229
|
def _run( # type: ignore[misc]
|
|
189
230
|
self,
|
|
190
231
|
messages: List[OpenAIMessage],
|
|
@@ -202,12 +243,38 @@ class SambaModel(BaseModelBackend):
|
|
|
202
243
|
`ChatCompletion` in the non-stream mode, or
|
|
203
244
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
204
245
|
"""
|
|
246
|
+
update_current_observation(
|
|
247
|
+
input={
|
|
248
|
+
"messages": messages,
|
|
249
|
+
"tools": tools,
|
|
250
|
+
},
|
|
251
|
+
model=str(self.model_type),
|
|
252
|
+
model_parameters=self.model_config_dict,
|
|
253
|
+
)
|
|
254
|
+
# Update Langfuse trace with current agent session and metadata
|
|
255
|
+
agent_session_id = get_current_agent_session_id()
|
|
256
|
+
if agent_session_id:
|
|
257
|
+
update_langfuse_trace(
|
|
258
|
+
session_id=agent_session_id,
|
|
259
|
+
metadata={
|
|
260
|
+
"source": "camel",
|
|
261
|
+
"agent_id": agent_session_id,
|
|
262
|
+
"agent_type": "camel_chat_agent",
|
|
263
|
+
"model_type": str(self.model_type),
|
|
264
|
+
},
|
|
265
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
266
|
+
)
|
|
267
|
+
|
|
205
268
|
if "tools" in self.model_config_dict:
|
|
206
269
|
del self.model_config_dict["tools"]
|
|
207
270
|
if self.model_config_dict.get("stream") is True:
|
|
208
271
|
return self._run_streaming(messages)
|
|
209
272
|
else:
|
|
210
|
-
|
|
273
|
+
response = self._run_non_streaming(messages)
|
|
274
|
+
update_current_observation(
|
|
275
|
+
usage=response.usage,
|
|
276
|
+
)
|
|
277
|
+
return response
|
|
211
278
|
|
|
212
279
|
def _run_streaming(
|
|
213
280
|
self, messages: List[OpenAIMessage]
|
camel/models/sglang_model.py
CHANGED
|
@@ -29,7 +29,21 @@ from camel.types import (
|
|
|
29
29
|
ChatCompletionChunk,
|
|
30
30
|
ModelType,
|
|
31
31
|
)
|
|
32
|
-
from camel.utils import
|
|
32
|
+
from camel.utils import (
|
|
33
|
+
BaseTokenCounter,
|
|
34
|
+
OpenAITokenCounter,
|
|
35
|
+
get_current_agent_session_id,
|
|
36
|
+
update_current_observation,
|
|
37
|
+
update_langfuse_trace,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
41
|
+
try:
|
|
42
|
+
from langfuse.decorators import observe
|
|
43
|
+
except ImportError:
|
|
44
|
+
from camel.utils import observe
|
|
45
|
+
else:
|
|
46
|
+
from camel.utils import observe
|
|
33
47
|
|
|
34
48
|
|
|
35
49
|
class SGLangModel(BaseModelBackend):
|
|
@@ -195,6 +209,7 @@ class SGLangModel(BaseModelBackend):
|
|
|
195
209
|
"input into SGLang model backend."
|
|
196
210
|
)
|
|
197
211
|
|
|
212
|
+
@observe(as_type='generation')
|
|
198
213
|
async def _arun(
|
|
199
214
|
self,
|
|
200
215
|
messages: List[OpenAIMessage],
|
|
@@ -213,6 +228,28 @@ class SGLangModel(BaseModelBackend):
|
|
|
213
228
|
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
214
229
|
"""
|
|
215
230
|
|
|
231
|
+
update_current_observation(
|
|
232
|
+
input={
|
|
233
|
+
"messages": messages,
|
|
234
|
+
"tools": tools,
|
|
235
|
+
},
|
|
236
|
+
model=str(self.model_type),
|
|
237
|
+
model_parameters=self.model_config_dict,
|
|
238
|
+
)
|
|
239
|
+
# Update Langfuse trace with current agent session and metadata
|
|
240
|
+
agent_session_id = get_current_agent_session_id()
|
|
241
|
+
if agent_session_id:
|
|
242
|
+
update_langfuse_trace(
|
|
243
|
+
session_id=agent_session_id,
|
|
244
|
+
metadata={
|
|
245
|
+
"source": "camel",
|
|
246
|
+
"agent_id": agent_session_id,
|
|
247
|
+
"agent_type": "camel_chat_agent",
|
|
248
|
+
"model_type": str(self.model_type),
|
|
249
|
+
},
|
|
250
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
251
|
+
)
|
|
252
|
+
|
|
216
253
|
# Ensure server is running
|
|
217
254
|
self._ensure_server_running()
|
|
218
255
|
|
|
@@ -230,9 +267,16 @@ class SGLangModel(BaseModelBackend):
|
|
|
230
267
|
model=self.model_type,
|
|
231
268
|
**self.model_config_dict,
|
|
232
269
|
)
|
|
233
|
-
|
|
270
|
+
update_current_observation(
|
|
271
|
+
usage_details={
|
|
272
|
+
"prompt_tokens": response.usage.prompt_tokens,
|
|
273
|
+
"completion_tokens": response.usage.completion_tokens,
|
|
274
|
+
"total_tokens": response.usage.total_tokens,
|
|
275
|
+
},
|
|
276
|
+
)
|
|
234
277
|
return response
|
|
235
278
|
|
|
279
|
+
@observe(as_type='generation')
|
|
236
280
|
def _run(
|
|
237
281
|
self,
|
|
238
282
|
messages: List[OpenAIMessage],
|
|
@@ -250,6 +294,27 @@ class SGLangModel(BaseModelBackend):
|
|
|
250
294
|
`ChatCompletion` in the non-stream mode, or
|
|
251
295
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
252
296
|
"""
|
|
297
|
+
update_current_observation(
|
|
298
|
+
input={
|
|
299
|
+
"messages": messages,
|
|
300
|
+
"tools": tools,
|
|
301
|
+
},
|
|
302
|
+
model=str(self.model_type),
|
|
303
|
+
model_parameters=self.model_config_dict,
|
|
304
|
+
)
|
|
305
|
+
# Update Langfuse trace with current agent session and metadata
|
|
306
|
+
agent_session_id = get_current_agent_session_id()
|
|
307
|
+
if agent_session_id:
|
|
308
|
+
update_langfuse_trace(
|
|
309
|
+
session_id=agent_session_id,
|
|
310
|
+
metadata={
|
|
311
|
+
"source": "camel",
|
|
312
|
+
"agent_id": agent_session_id,
|
|
313
|
+
"agent_type": "camel_chat_agent",
|
|
314
|
+
"model_type": str(self.model_type),
|
|
315
|
+
},
|
|
316
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
317
|
+
)
|
|
253
318
|
|
|
254
319
|
# Ensure server is running
|
|
255
320
|
self._ensure_server_running()
|
|
@@ -268,6 +333,13 @@ class SGLangModel(BaseModelBackend):
|
|
|
268
333
|
model=self.model_type,
|
|
269
334
|
**self.model_config_dict,
|
|
270
335
|
)
|
|
336
|
+
update_current_observation(
|
|
337
|
+
usage_details={
|
|
338
|
+
"prompt_tokens": response.usage.prompt_tokens,
|
|
339
|
+
"completion_tokens": response.usage.completion_tokens,
|
|
340
|
+
"total_tokens": response.usage.total_tokens,
|
|
341
|
+
},
|
|
342
|
+
)
|
|
271
343
|
|
|
272
344
|
return response
|
|
273
345
|
|
camel/models/watsonx_model.py
CHANGED
|
@@ -26,8 +26,19 @@ from camel.utils import (
|
|
|
26
26
|
BaseTokenCounter,
|
|
27
27
|
OpenAITokenCounter,
|
|
28
28
|
api_keys_required,
|
|
29
|
+
get_current_agent_session_id,
|
|
30
|
+
update_current_observation,
|
|
31
|
+
update_langfuse_trace,
|
|
29
32
|
)
|
|
30
33
|
|
|
34
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
35
|
+
try:
|
|
36
|
+
from langfuse.decorators import observe
|
|
37
|
+
except ImportError:
|
|
38
|
+
from camel.utils import observe
|
|
39
|
+
else:
|
|
40
|
+
from camel.utils import observe
|
|
41
|
+
|
|
31
42
|
logger = get_logger(__name__)
|
|
32
43
|
|
|
33
44
|
|
|
@@ -151,6 +162,7 @@ class WatsonXModel(BaseModelBackend):
|
|
|
151
162
|
|
|
152
163
|
return request_config
|
|
153
164
|
|
|
165
|
+
@observe(as_type='generation')
|
|
154
166
|
def _run(
|
|
155
167
|
self,
|
|
156
168
|
messages: List[OpenAIMessage],
|
|
@@ -170,6 +182,27 @@ class WatsonXModel(BaseModelBackend):
|
|
|
170
182
|
Returns:
|
|
171
183
|
ChatCompletion.
|
|
172
184
|
"""
|
|
185
|
+
update_current_observation(
|
|
186
|
+
input={
|
|
187
|
+
"messages": messages,
|
|
188
|
+
"tools": tools,
|
|
189
|
+
},
|
|
190
|
+
model=str(self.model_type),
|
|
191
|
+
model_parameters=self.model_config_dict,
|
|
192
|
+
)
|
|
193
|
+
# Update Langfuse trace with current agent session and metadata
|
|
194
|
+
agent_session_id = get_current_agent_session_id()
|
|
195
|
+
if agent_session_id:
|
|
196
|
+
update_langfuse_trace(
|
|
197
|
+
session_id=agent_session_id,
|
|
198
|
+
metadata={
|
|
199
|
+
"source": "camel",
|
|
200
|
+
"agent_id": agent_session_id,
|
|
201
|
+
"agent_type": "camel_chat_agent",
|
|
202
|
+
"model_type": str(self.model_type),
|
|
203
|
+
},
|
|
204
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
205
|
+
)
|
|
173
206
|
try:
|
|
174
207
|
request_config = self._prepare_request(
|
|
175
208
|
messages, response_format, tools
|
|
@@ -183,12 +216,16 @@ class WatsonXModel(BaseModelBackend):
|
|
|
183
216
|
)
|
|
184
217
|
|
|
185
218
|
openai_response = self._to_openai_response(response)
|
|
219
|
+
update_current_observation(
|
|
220
|
+
usage=openai_response.usage,
|
|
221
|
+
)
|
|
186
222
|
return openai_response
|
|
187
223
|
|
|
188
224
|
except Exception as e:
|
|
189
225
|
logger.error(f"Unexpected error when calling WatsonX API: {e!s}")
|
|
190
226
|
raise
|
|
191
227
|
|
|
228
|
+
@observe(as_type='generation')
|
|
192
229
|
async def _arun(
|
|
193
230
|
self,
|
|
194
231
|
messages: List[OpenAIMessage],
|
|
@@ -208,6 +245,28 @@ class WatsonXModel(BaseModelBackend):
|
|
|
208
245
|
Returns:
|
|
209
246
|
ChatCompletion.
|
|
210
247
|
"""
|
|
248
|
+
update_current_observation(
|
|
249
|
+
input={
|
|
250
|
+
"messages": messages,
|
|
251
|
+
"tools": tools,
|
|
252
|
+
},
|
|
253
|
+
model=str(self.model_type),
|
|
254
|
+
model_parameters=self.model_config_dict,
|
|
255
|
+
)
|
|
256
|
+
# Update Langfuse trace with current agent session and metadata
|
|
257
|
+
agent_session_id = get_current_agent_session_id()
|
|
258
|
+
if agent_session_id:
|
|
259
|
+
update_langfuse_trace(
|
|
260
|
+
session_id=agent_session_id,
|
|
261
|
+
metadata={
|
|
262
|
+
"source": "camel",
|
|
263
|
+
"agent_id": agent_session_id,
|
|
264
|
+
"agent_type": "camel_chat_agent",
|
|
265
|
+
"model_type": str(self.model_type),
|
|
266
|
+
},
|
|
267
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
268
|
+
)
|
|
269
|
+
|
|
211
270
|
try:
|
|
212
271
|
request_config = self._prepare_request(
|
|
213
272
|
messages, response_format, tools
|
|
@@ -221,6 +280,9 @@ class WatsonXModel(BaseModelBackend):
|
|
|
221
280
|
)
|
|
222
281
|
|
|
223
282
|
openai_response = self._to_openai_response(response)
|
|
283
|
+
update_current_observation(
|
|
284
|
+
usage=openai_response.usage,
|
|
285
|
+
)
|
|
224
286
|
return openai_response
|
|
225
287
|
|
|
226
288
|
except Exception as e:
|
|
@@ -128,12 +128,31 @@ class AutoRetriever:
|
|
|
128
128
|
Returns:
|
|
129
129
|
str: A sanitized, valid collection name suitable for use.
|
|
130
130
|
"""
|
|
131
|
+
import hashlib
|
|
132
|
+
import os
|
|
133
|
+
|
|
131
134
|
from unstructured.documents.elements import Element
|
|
132
135
|
|
|
133
136
|
if isinstance(content, Element):
|
|
134
137
|
content = content.metadata.file_directory or str(uuid.uuid4())
|
|
135
138
|
|
|
136
|
-
|
|
139
|
+
# For file paths, use a combination of directory hash and filename
|
|
140
|
+
if os.path.isfile(content):
|
|
141
|
+
# Get directory and filename
|
|
142
|
+
directory = os.path.dirname(content)
|
|
143
|
+
filename = os.path.basename(content)
|
|
144
|
+
# Create a short hash of the directory path
|
|
145
|
+
dir_hash = hashlib.md5(directory.encode()).hexdigest()[:6]
|
|
146
|
+
# Get filename without extension and remove special chars
|
|
147
|
+
base_name = os.path.splitext(filename)[0]
|
|
148
|
+
clean_name = re.sub(r'[^a-zA-Z0-9]', '', base_name)[:10]
|
|
149
|
+
# Combine for a unique name
|
|
150
|
+
collection_name = f"{clean_name}_{dir_hash}"
|
|
151
|
+
else:
|
|
152
|
+
# For URL content
|
|
153
|
+
content_hash = hashlib.md5(content.encode()).hexdigest()[:6]
|
|
154
|
+
clean_content = re.sub(r'[^a-zA-Z0-9]', '', content)[-10:]
|
|
155
|
+
collection_name = f"{clean_content}_{content_hash}"
|
|
137
156
|
|
|
138
157
|
# Ensure the first character is either an underscore or a letter for
|
|
139
158
|
# Milvus
|
|
@@ -21,7 +21,7 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
21
21
|
from pydantic import BaseModel
|
|
22
22
|
|
|
23
23
|
from camel.logger import get_logger
|
|
24
|
-
from camel.
|
|
24
|
+
from camel.runtimes import BaseRuntime
|
|
25
25
|
from camel.toolkits.function_tool import FunctionTool
|
|
26
26
|
|
|
27
27
|
logger = get_logger(__name__)
|
|
@@ -19,8 +19,8 @@ from typing import List, Optional, Union
|
|
|
19
19
|
from camel.agents import ChatAgent
|
|
20
20
|
from camel.configs import ChatGPTConfig
|
|
21
21
|
from camel.models import BaseModelBackend, ModelFactory
|
|
22
|
-
from camel.
|
|
23
|
-
from camel.
|
|
22
|
+
from camel.runtimes import BaseRuntime
|
|
23
|
+
from camel.runtimes.utils import FunctionRiskToolkit, IgnoreRiskToolkit
|
|
24
24
|
from camel.toolkits import FunctionTool
|
|
25
25
|
from camel.types import ModelPlatformType, ModelType
|
|
26
26
|
|
|
@@ -24,7 +24,7 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
24
24
|
import requests
|
|
25
25
|
from pydantic import BaseModel
|
|
26
26
|
|
|
27
|
-
from camel.
|
|
27
|
+
from camel.runtimes import BaseRuntime
|
|
28
28
|
from camel.toolkits.function_tool import FunctionTool
|
|
29
29
|
|
|
30
30
|
logger = logging.getLogger(__name__)
|