camel-ai 0.2.62__py3-none-any.whl → 0.2.64__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (46) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +76 -17
  3. camel/agents/mcp_agent.py +5 -1
  4. camel/configs/__init__.py +3 -0
  5. camel/configs/crynux_config.py +94 -0
  6. camel/interpreters/base.py +14 -1
  7. camel/interpreters/docker/Dockerfile +63 -7
  8. camel/interpreters/docker_interpreter.py +65 -7
  9. camel/interpreters/e2b_interpreter.py +23 -8
  10. camel/interpreters/internal_python_interpreter.py +30 -2
  11. camel/interpreters/ipython_interpreter.py +21 -3
  12. camel/interpreters/subprocess_interpreter.py +34 -2
  13. camel/memories/records.py +5 -3
  14. camel/models/__init__.py +2 -0
  15. camel/models/azure_openai_model.py +101 -25
  16. camel/models/cohere_model.py +65 -0
  17. camel/models/crynux_model.py +94 -0
  18. camel/models/deepseek_model.py +43 -1
  19. camel/models/gemini_model.py +50 -4
  20. camel/models/litellm_model.py +38 -0
  21. camel/models/mistral_model.py +66 -0
  22. camel/models/model_factory.py +10 -1
  23. camel/models/openai_compatible_model.py +81 -17
  24. camel/models/openai_model.py +86 -16
  25. camel/models/reka_model.py +69 -0
  26. camel/models/samba_model.py +69 -2
  27. camel/models/sglang_model.py +74 -2
  28. camel/models/watsonx_model.py +62 -0
  29. camel/societies/workforce/role_playing_worker.py +2 -2
  30. camel/societies/workforce/single_agent_worker.py +23 -0
  31. camel/societies/workforce/workforce.py +409 -7
  32. camel/storages/__init__.py +2 -0
  33. camel/storages/vectordb_storages/__init__.py +2 -0
  34. camel/storages/vectordb_storages/weaviate.py +714 -0
  35. camel/tasks/task.py +19 -10
  36. camel/toolkits/code_execution.py +37 -8
  37. camel/toolkits/mcp_toolkit.py +13 -2
  38. camel/types/enums.py +56 -1
  39. camel/types/unified_model_type.py +5 -0
  40. camel/utils/__init__.py +16 -0
  41. camel/utils/langfuse.py +258 -0
  42. camel/utils/mcp_client.py +84 -17
  43. {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/METADATA +6 -1
  44. {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/RECORD +46 -42
  45. {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/WHEEL +0 -0
  46. {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/licenses/LICENSE +0 -0
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
14
15
  from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
15
16
 
16
17
  from pydantic import BaseModel
@@ -24,8 +25,19 @@ from camel.utils import (
24
25
  OpenAITokenCounter,
25
26
  api_keys_required,
26
27
  dependencies_required,
28
+ get_current_agent_session_id,
29
+ update_current_observation,
30
+ update_langfuse_trace,
27
31
  )
28
32
 
33
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
34
+ try:
35
+ from langfuse.decorators import observe
36
+ except ImportError:
37
+ from camel.utils import observe
38
+ else:
39
+ from camel.utils import observe
40
+
29
41
  if TYPE_CHECKING:
30
42
  from reka.types import ChatMessage, ChatResponse
31
43
 
@@ -188,6 +200,7 @@ class RekaModel(BaseModelBackend):
188
200
  )
189
201
  return self._token_counter
190
202
 
203
+ @observe(as_type="generation")
191
204
  async def _arun(
192
205
  self,
193
206
  messages: List[OpenAIMessage],
@@ -203,6 +216,29 @@ class RekaModel(BaseModelBackend):
203
216
  Returns:
204
217
  ChatCompletion.
205
218
  """
219
+
220
+ update_current_observation(
221
+ input={
222
+ "messages": messages,
223
+ "tools": tools,
224
+ },
225
+ model=str(self.model_type),
226
+ model_parameters=self.model_config_dict,
227
+ )
228
+ # Update Langfuse trace with current agent session and metadata
229
+ agent_session_id = get_current_agent_session_id()
230
+ if agent_session_id:
231
+ update_langfuse_trace(
232
+ session_id=agent_session_id,
233
+ metadata={
234
+ "source": "camel",
235
+ "agent_id": agent_session_id,
236
+ "agent_type": "camel_chat_agent",
237
+ "model_type": str(self.model_type),
238
+ },
239
+ tags=["CAMEL-AI", str(self.model_type)],
240
+ )
241
+
206
242
  reka_messages = self._convert_openai_to_reka_messages(messages)
207
243
 
208
244
  response = await self._async_client.chat.create(
@@ -213,6 +249,10 @@ class RekaModel(BaseModelBackend):
213
249
 
214
250
  openai_response = self._convert_reka_to_openai_response(response)
215
251
 
252
+ update_current_observation(
253
+ usage=openai_response.usage,
254
+ )
255
+
216
256
  # Add AgentOps LLM Event tracking
217
257
  if LLMEvent:
218
258
  llm_event = LLMEvent(
@@ -229,6 +269,7 @@ class RekaModel(BaseModelBackend):
229
269
 
230
270
  return openai_response
231
271
 
272
+ @observe(as_type="generation")
232
273
  def _run(
233
274
  self,
234
275
  messages: List[OpenAIMessage],
@@ -244,6 +285,30 @@ class RekaModel(BaseModelBackend):
244
285
  Returns:
245
286
  ChatCompletion.
246
287
  """
288
+
289
+ update_current_observation(
290
+ input={
291
+ "messages": messages,
292
+ "tools": tools,
293
+ },
294
+ model=str(self.model_type),
295
+ model_parameters=self.model_config_dict,
296
+ )
297
+
298
+ # Update Langfuse trace with current agent session and metadata
299
+ agent_session_id = get_current_agent_session_id()
300
+ if agent_session_id:
301
+ update_langfuse_trace(
302
+ session_id=agent_session_id,
303
+ metadata={
304
+ "source": "camel",
305
+ "agent_id": agent_session_id,
306
+ "agent_type": "camel_chat_agent",
307
+ "model_type": str(self.model_type),
308
+ },
309
+ tags=["CAMEL-AI", str(self.model_type)],
310
+ )
311
+
247
312
  reka_messages = self._convert_openai_to_reka_messages(messages)
248
313
 
249
314
  response = self._client.chat.create(
@@ -254,6 +319,10 @@ class RekaModel(BaseModelBackend):
254
319
 
255
320
  openai_response = self._convert_reka_to_openai_response(response)
256
321
 
322
+ update_current_observation(
323
+ usage=openai_response.usage,
324
+ )
325
+
257
326
  # Add AgentOps LLM Event tracking
258
327
  if LLMEvent:
259
328
  llm_event = LLMEvent(
@@ -38,6 +38,9 @@ from camel.utils import (
38
38
  BaseTokenCounter,
39
39
  OpenAITokenCounter,
40
40
  api_keys_required,
41
+ get_current_agent_session_id,
42
+ update_current_observation,
43
+ update_langfuse_trace,
41
44
  )
42
45
 
43
46
  try:
@@ -48,6 +51,14 @@ try:
48
51
  except (ImportError, AttributeError):
49
52
  LLMEvent = None
50
53
 
54
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
55
+ try:
56
+ from langfuse.decorators import observe
57
+ except ImportError:
58
+ from camel.utils import observe
59
+ else:
60
+ from camel.utils import observe
61
+
51
62
 
52
63
  class SambaModel(BaseModelBackend):
53
64
  r"""SambaNova service interface.
@@ -161,6 +172,7 @@ class SambaModel(BaseModelBackend):
161
172
  " SambaNova service"
162
173
  )
163
174
 
175
+ @observe(as_type="generation")
164
176
  async def _arun( # type: ignore[misc]
165
177
  self,
166
178
  messages: List[OpenAIMessage],
@@ -178,13 +190,42 @@ class SambaModel(BaseModelBackend):
178
190
  `ChatCompletion` in the non-stream mode, or
179
191
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
180
192
  """
193
+
194
+ update_current_observation(
195
+ input={
196
+ "messages": messages,
197
+ "tools": tools,
198
+ },
199
+ model=str(self.model_type),
200
+ model_parameters=self.model_config_dict,
201
+ )
202
+
203
+ # Update Langfuse trace with current agent session and metadata
204
+ agent_session_id = get_current_agent_session_id()
205
+ if agent_session_id:
206
+ update_langfuse_trace(
207
+ session_id=agent_session_id,
208
+ metadata={
209
+ "source": "camel",
210
+ "agent_id": agent_session_id,
211
+ "agent_type": "camel_chat_agent",
212
+ "model_type": str(self.model_type),
213
+ },
214
+ tags=["CAMEL-AI", str(self.model_type)],
215
+ )
216
+
181
217
  if "tools" in self.model_config_dict:
182
218
  del self.model_config_dict["tools"]
183
219
  if self.model_config_dict.get("stream") is True:
184
220
  return await self._arun_streaming(messages)
185
221
  else:
186
- return await self._arun_non_streaming(messages)
222
+ response = await self._arun_non_streaming(messages)
223
+ update_current_observation(
224
+ usage=response.usage,
225
+ )
226
+ return response
187
227
 
228
+ @observe(as_type="generation")
188
229
  def _run( # type: ignore[misc]
189
230
  self,
190
231
  messages: List[OpenAIMessage],
@@ -202,12 +243,38 @@ class SambaModel(BaseModelBackend):
202
243
  `ChatCompletion` in the non-stream mode, or
203
244
  `Stream[ChatCompletionChunk]` in the stream mode.
204
245
  """
246
+ update_current_observation(
247
+ input={
248
+ "messages": messages,
249
+ "tools": tools,
250
+ },
251
+ model=str(self.model_type),
252
+ model_parameters=self.model_config_dict,
253
+ )
254
+ # Update Langfuse trace with current agent session and metadata
255
+ agent_session_id = get_current_agent_session_id()
256
+ if agent_session_id:
257
+ update_langfuse_trace(
258
+ session_id=agent_session_id,
259
+ metadata={
260
+ "source": "camel",
261
+ "agent_id": agent_session_id,
262
+ "agent_type": "camel_chat_agent",
263
+ "model_type": str(self.model_type),
264
+ },
265
+ tags=["CAMEL-AI", str(self.model_type)],
266
+ )
267
+
205
268
  if "tools" in self.model_config_dict:
206
269
  del self.model_config_dict["tools"]
207
270
  if self.model_config_dict.get("stream") is True:
208
271
  return self._run_streaming(messages)
209
272
  else:
210
- return self._run_non_streaming(messages)
273
+ response = self._run_non_streaming(messages)
274
+ update_current_observation(
275
+ usage=response.usage,
276
+ )
277
+ return response
211
278
 
212
279
  def _run_streaming(
213
280
  self, messages: List[OpenAIMessage]
@@ -29,7 +29,21 @@ from camel.types import (
29
29
  ChatCompletionChunk,
30
30
  ModelType,
31
31
  )
32
- from camel.utils import BaseTokenCounter, OpenAITokenCounter
32
+ from camel.utils import (
33
+ BaseTokenCounter,
34
+ OpenAITokenCounter,
35
+ get_current_agent_session_id,
36
+ update_current_observation,
37
+ update_langfuse_trace,
38
+ )
39
+
40
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
41
+ try:
42
+ from langfuse.decorators import observe
43
+ except ImportError:
44
+ from camel.utils import observe
45
+ else:
46
+ from camel.utils import observe
33
47
 
34
48
 
35
49
  class SGLangModel(BaseModelBackend):
@@ -195,6 +209,7 @@ class SGLangModel(BaseModelBackend):
195
209
  "input into SGLang model backend."
196
210
  )
197
211
 
212
+ @observe(as_type='generation')
198
213
  async def _arun(
199
214
  self,
200
215
  messages: List[OpenAIMessage],
@@ -213,6 +228,28 @@ class SGLangModel(BaseModelBackend):
213
228
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
214
229
  """
215
230
 
231
+ update_current_observation(
232
+ input={
233
+ "messages": messages,
234
+ "tools": tools,
235
+ },
236
+ model=str(self.model_type),
237
+ model_parameters=self.model_config_dict,
238
+ )
239
+ # Update Langfuse trace with current agent session and metadata
240
+ agent_session_id = get_current_agent_session_id()
241
+ if agent_session_id:
242
+ update_langfuse_trace(
243
+ session_id=agent_session_id,
244
+ metadata={
245
+ "source": "camel",
246
+ "agent_id": agent_session_id,
247
+ "agent_type": "camel_chat_agent",
248
+ "model_type": str(self.model_type),
249
+ },
250
+ tags=["CAMEL-AI", str(self.model_type)],
251
+ )
252
+
216
253
  # Ensure server is running
217
254
  self._ensure_server_running()
218
255
 
@@ -230,9 +267,16 @@ class SGLangModel(BaseModelBackend):
230
267
  model=self.model_type,
231
268
  **self.model_config_dict,
232
269
  )
233
-
270
+ update_current_observation(
271
+ usage_details={
272
+ "prompt_tokens": response.usage.prompt_tokens,
273
+ "completion_tokens": response.usage.completion_tokens,
274
+ "total_tokens": response.usage.total_tokens,
275
+ },
276
+ )
234
277
  return response
235
278
 
279
+ @observe(as_type='generation')
236
280
  def _run(
237
281
  self,
238
282
  messages: List[OpenAIMessage],
@@ -250,6 +294,27 @@ class SGLangModel(BaseModelBackend):
250
294
  `ChatCompletion` in the non-stream mode, or
251
295
  `Stream[ChatCompletionChunk]` in the stream mode.
252
296
  """
297
+ update_current_observation(
298
+ input={
299
+ "messages": messages,
300
+ "tools": tools,
301
+ },
302
+ model=str(self.model_type),
303
+ model_parameters=self.model_config_dict,
304
+ )
305
+ # Update Langfuse trace with current agent session and metadata
306
+ agent_session_id = get_current_agent_session_id()
307
+ if agent_session_id:
308
+ update_langfuse_trace(
309
+ session_id=agent_session_id,
310
+ metadata={
311
+ "source": "camel",
312
+ "agent_id": agent_session_id,
313
+ "agent_type": "camel_chat_agent",
314
+ "model_type": str(self.model_type),
315
+ },
316
+ tags=["CAMEL-AI", str(self.model_type)],
317
+ )
253
318
 
254
319
  # Ensure server is running
255
320
  self._ensure_server_running()
@@ -268,6 +333,13 @@ class SGLangModel(BaseModelBackend):
268
333
  model=self.model_type,
269
334
  **self.model_config_dict,
270
335
  )
336
+ update_current_observation(
337
+ usage_details={
338
+ "prompt_tokens": response.usage.prompt_tokens,
339
+ "completion_tokens": response.usage.completion_tokens,
340
+ "total_tokens": response.usage.total_tokens,
341
+ },
342
+ )
271
343
 
272
344
  return response
273
345
 
@@ -26,8 +26,19 @@ from camel.utils import (
26
26
  BaseTokenCounter,
27
27
  OpenAITokenCounter,
28
28
  api_keys_required,
29
+ get_current_agent_session_id,
30
+ update_current_observation,
31
+ update_langfuse_trace,
29
32
  )
30
33
 
34
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
35
+ try:
36
+ from langfuse.decorators import observe
37
+ except ImportError:
38
+ from camel.utils import observe
39
+ else:
40
+ from camel.utils import observe
41
+
31
42
  logger = get_logger(__name__)
32
43
 
33
44
 
@@ -151,6 +162,7 @@ class WatsonXModel(BaseModelBackend):
151
162
 
152
163
  return request_config
153
164
 
165
+ @observe(as_type='generation')
154
166
  def _run(
155
167
  self,
156
168
  messages: List[OpenAIMessage],
@@ -170,6 +182,27 @@ class WatsonXModel(BaseModelBackend):
170
182
  Returns:
171
183
  ChatCompletion.
172
184
  """
185
+ update_current_observation(
186
+ input={
187
+ "messages": messages,
188
+ "tools": tools,
189
+ },
190
+ model=str(self.model_type),
191
+ model_parameters=self.model_config_dict,
192
+ )
193
+ # Update Langfuse trace with current agent session and metadata
194
+ agent_session_id = get_current_agent_session_id()
195
+ if agent_session_id:
196
+ update_langfuse_trace(
197
+ session_id=agent_session_id,
198
+ metadata={
199
+ "source": "camel",
200
+ "agent_id": agent_session_id,
201
+ "agent_type": "camel_chat_agent",
202
+ "model_type": str(self.model_type),
203
+ },
204
+ tags=["CAMEL-AI", str(self.model_type)],
205
+ )
173
206
  try:
174
207
  request_config = self._prepare_request(
175
208
  messages, response_format, tools
@@ -183,12 +216,16 @@ class WatsonXModel(BaseModelBackend):
183
216
  )
184
217
 
185
218
  openai_response = self._to_openai_response(response)
219
+ update_current_observation(
220
+ usage=openai_response.usage,
221
+ )
186
222
  return openai_response
187
223
 
188
224
  except Exception as e:
189
225
  logger.error(f"Unexpected error when calling WatsonX API: {e!s}")
190
226
  raise
191
227
 
228
+ @observe(as_type='generation')
192
229
  async def _arun(
193
230
  self,
194
231
  messages: List[OpenAIMessage],
@@ -208,6 +245,28 @@ class WatsonXModel(BaseModelBackend):
208
245
  Returns:
209
246
  ChatCompletion.
210
247
  """
248
+ update_current_observation(
249
+ input={
250
+ "messages": messages,
251
+ "tools": tools,
252
+ },
253
+ model=str(self.model_type),
254
+ model_parameters=self.model_config_dict,
255
+ )
256
+ # Update Langfuse trace with current agent session and metadata
257
+ agent_session_id = get_current_agent_session_id()
258
+ if agent_session_id:
259
+ update_langfuse_trace(
260
+ session_id=agent_session_id,
261
+ metadata={
262
+ "source": "camel",
263
+ "agent_id": agent_session_id,
264
+ "agent_type": "camel_chat_agent",
265
+ "model_type": str(self.model_type),
266
+ },
267
+ tags=["CAMEL-AI", str(self.model_type)],
268
+ )
269
+
211
270
  try:
212
271
  request_config = self._prepare_request(
213
272
  messages, response_format, tools
@@ -221,6 +280,9 @@ class WatsonXModel(BaseModelBackend):
221
280
  )
222
281
 
223
282
  openai_response = self._to_openai_response(response)
283
+ update_current_observation(
284
+ usage=openai_response.usage,
285
+ )
224
286
  return openai_response
225
287
 
226
288
  except Exception as e:
@@ -48,7 +48,7 @@ class RolePlayingWorker(Worker):
48
48
  initialize the summarize agent, like the model name, etc.
49
49
  (default: :obj:`None`)
50
50
  chat_turn_limit (int): The maximum number of chat turns in the role
51
- playing. (default: :obj:`3`)
51
+ playing. (default: :obj:`20`)
52
52
  """
53
53
 
54
54
  def __init__(
@@ -59,7 +59,7 @@ class RolePlayingWorker(Worker):
59
59
  assistant_agent_kwargs: Optional[Dict] = None,
60
60
  user_agent_kwargs: Optional[Dict] = None,
61
61
  summarize_agent_kwargs: Optional[Dict] = None,
62
- chat_turn_limit: int = 3,
62
+ chat_turn_limit: int = 20,
63
63
  ) -> None:
64
64
  super().__init__(description)
65
65
  self.summarize_agent_kwargs = summarize_agent_kwargs
@@ -13,6 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
+ import datetime
16
17
  import json
17
18
  from typing import Any, List
18
19
 
@@ -83,6 +84,28 @@ class SingleAgentWorker(Worker):
83
84
  )
84
85
  return TaskState.FAILED
85
86
 
87
+ # Populate additional_info with worker attempt details
88
+ if task.additional_info is None:
89
+ task.additional_info = {}
90
+
91
+ # Create worker attempt details with descriptive keys
92
+ worker_attempt_details = {
93
+ "agent_id": getattr(
94
+ self.worker, "agent_id", self.worker.role_name
95
+ ),
96
+ "timestamp": str(datetime.datetime.now()),
97
+ "description": f"Attempt by "
98
+ f"{getattr(self.worker, 'agent_id', self.worker.role_name)} "
99
+ f"to process task {task.content}",
100
+ "response_content": response.msg.content,
101
+ "tool_calls": response.info["tool_calls"],
102
+ }
103
+
104
+ # Store the worker attempt in additional_info
105
+ if "worker_attempts" not in task.additional_info:
106
+ task.additional_info["worker_attempts"] = []
107
+ task.additional_info["worker_attempts"].append(worker_attempt_details)
108
+
86
109
  print(f"======\n{Fore.GREEN}Reply from {self}:{Fore.RESET}")
87
110
 
88
111
  result_dict = json.loads(response.msg.content)