camel-ai 0.2.72a4__py3-none-any.whl → 0.2.72a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.72a4'
17
+ __version__ = '0.2.72a5'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -1264,7 +1264,11 @@ class ChatAgent(BaseAgent):
1264
1264
  openai_message: OpenAIMessage = {"role": "user", "content": prompt}
1265
1265
  # Explicitly set the tools to empty list to avoid calling tools
1266
1266
  response = self._get_model_response(
1267
- [openai_message], 0, response_format, []
1267
+ openai_messages=[openai_message],
1268
+ num_tokens=0,
1269
+ response_format=response_format,
1270
+ tool_schemas=[],
1271
+ prev_num_openai_messages=0,
1268
1272
  )
1269
1273
  message.content = response.output_messages[0].content
1270
1274
  if not self._try_format_message(message, response_format):
@@ -1292,7 +1296,11 @@ class ChatAgent(BaseAgent):
1292
1296
  prompt = SIMPLE_FORMAT_PROMPT.format(content=message.content)
1293
1297
  openai_message: OpenAIMessage = {"role": "user", "content": prompt}
1294
1298
  response = await self._aget_model_response(
1295
- [openai_message], 0, response_format, []
1299
+ openai_messages=[openai_message],
1300
+ num_tokens=0,
1301
+ response_format=response_format,
1302
+ tool_schemas=[],
1303
+ prev_num_openai_messages=0,
1296
1304
  )
1297
1305
  message.content = response.output_messages[0].content
1298
1306
  self._try_format_message(message, response_format)
@@ -1374,7 +1382,8 @@ class ChatAgent(BaseAgent):
1374
1382
 
1375
1383
  # Initialize token usage tracker
1376
1384
  step_token_usage = self._create_token_usage_tracker()
1377
- iteration_count = 0
1385
+ iteration_count: int = 0
1386
+ prev_num_openai_messages: int = 0
1378
1387
 
1379
1388
  while True:
1380
1389
  if self.pause_event is not None and not self.pause_event.is_set():
@@ -1391,10 +1400,13 @@ class ChatAgent(BaseAgent):
1391
1400
  # Get response from model backend
1392
1401
  response = self._get_model_response(
1393
1402
  openai_messages,
1394
- accumulated_context_tokens, # Cumulative context tokens
1395
- response_format,
1396
- self._get_full_tool_schemas(),
1403
+ num_tokens=num_tokens,
1404
+ current_iteration=iteration_count,
1405
+ response_format=response_format,
1406
+ tool_schemas=self._get_full_tool_schemas(),
1407
+ prev_num_openai_messages=prev_num_openai_messages,
1397
1408
  )
1409
+ prev_num_openai_messages = len(openai_messages)
1398
1410
  iteration_count += 1
1399
1411
 
1400
1412
  # Accumulate API token usage
@@ -1405,6 +1417,9 @@ class ChatAgent(BaseAgent):
1405
1417
  # Terminate Agent if stop_event is set
1406
1418
  if self.stop_event and self.stop_event.is_set():
1407
1419
  # Use the _step_terminate to terminate the agent with reason
1420
+ logger.info(
1421
+ f"Termination triggered at iteration " f"{iteration_count}"
1422
+ )
1408
1423
  return self._step_terminate(
1409
1424
  accumulated_context_tokens,
1410
1425
  tool_call_records,
@@ -1439,6 +1454,7 @@ class ChatAgent(BaseAgent):
1439
1454
  self.max_iteration is not None
1440
1455
  and iteration_count >= self.max_iteration
1441
1456
  ):
1457
+ logger.info(f"Max iteration reached: {iteration_count}")
1442
1458
  break
1443
1459
 
1444
1460
  # If we're still here, continue the loop
@@ -1564,7 +1580,8 @@ class ChatAgent(BaseAgent):
1564
1580
 
1565
1581
  # Initialize token usage tracker
1566
1582
  step_token_usage = self._create_token_usage_tracker()
1567
- iteration_count = 0
1583
+ iteration_count: int = 0
1584
+ prev_num_openai_messages: int = 0
1568
1585
  while True:
1569
1586
  if self.pause_event is not None and not self.pause_event.is_set():
1570
1587
  await self.pause_event.wait()
@@ -1578,10 +1595,13 @@ class ChatAgent(BaseAgent):
1578
1595
 
1579
1596
  response = await self._aget_model_response(
1580
1597
  openai_messages,
1581
- accumulated_context_tokens,
1582
- response_format,
1583
- self._get_full_tool_schemas(),
1598
+ num_tokens=num_tokens,
1599
+ current_iteration=iteration_count,
1600
+ response_format=response_format,
1601
+ tool_schemas=self._get_full_tool_schemas(),
1602
+ prev_num_openai_messages=prev_num_openai_messages,
1584
1603
  )
1604
+ prev_num_openai_messages = len(openai_messages)
1585
1605
  iteration_count += 1
1586
1606
 
1587
1607
  # Accumulate API token usage
@@ -1592,6 +1612,9 @@ class ChatAgent(BaseAgent):
1592
1612
  # Terminate Agent if stop_event is set
1593
1613
  if self.stop_event and self.stop_event.is_set():
1594
1614
  # Use the _step_terminate to terminate the agent with reason
1615
+ logger.info(
1616
+ f"Termination triggered at iteration " f"{iteration_count}"
1617
+ )
1595
1618
  return self._step_terminate(
1596
1619
  accumulated_context_tokens,
1597
1620
  tool_call_records,
@@ -1900,14 +1923,32 @@ class ChatAgent(BaseAgent):
1900
1923
  stripped_messages.append(msg)
1901
1924
  return stripped_messages
1902
1925
 
1926
+ @observe()
1903
1927
  def _get_model_response(
1904
1928
  self,
1905
1929
  openai_messages: List[OpenAIMessage],
1906
1930
  num_tokens: int,
1931
+ current_iteration: int = 0,
1907
1932
  response_format: Optional[Type[BaseModel]] = None,
1908
1933
  tool_schemas: Optional[List[Dict[str, Any]]] = None,
1934
+ prev_num_openai_messages: int = 0,
1909
1935
  ) -> ModelResponse:
1910
- r"""Internal function for agent step model response."""
1936
+ r"""Internal function for agent step model response.
1937
+ Args:
1938
+ openai_messages (List[OpenAIMessage]): The OpenAI
1939
+ messages to process.
1940
+ num_tokens (int): The number of tokens in the context.
1941
+ current_iteration (int): The current iteration of the step.
1942
+ response_format (Optional[Type[BaseModel]]): The response
1943
+ format to use.
1944
+ tool_schemas (Optional[List[Dict[str, Any]]]): The tool
1945
+ schemas to use.
1946
+ prev_num_openai_messages (int): The number of openai messages
1947
+ logged in the previous iteration.
1948
+
1949
+ Returns:
1950
+ ModelResponse: The model response.
1951
+ """
1911
1952
 
1912
1953
  response = None
1913
1954
  try:
@@ -1920,7 +1961,8 @@ class ChatAgent(BaseAgent):
1920
1961
  openai_messages
1921
1962
  ):
1922
1963
  logger.warning(
1923
- "Model appears to not support vision. Retrying without images." # noqa: E501
1964
+ "Model appears to not support vision."
1965
+ "Retrying without images."
1924
1966
  )
1925
1967
  try:
1926
1968
  stripped_messages = self._strip_images_from_messages(
@@ -1937,6 +1979,7 @@ class ChatAgent(BaseAgent):
1937
1979
  if not response:
1938
1980
  logger.error(
1939
1981
  f"An error occurred while running model "
1982
+ f"iteration {current_iteration}, "
1940
1983
  f"{self.model_backend.model_type}, "
1941
1984
  f"index: {self.model_backend.current_model_index}",
1942
1985
  exc_info=exc,
@@ -1955,11 +1998,12 @@ class ChatAgent(BaseAgent):
1955
1998
  )
1956
1999
 
1957
2000
  sanitized_messages = self._sanitize_messages_for_logging(
1958
- openai_messages
2001
+ openai_messages, prev_num_openai_messages
1959
2002
  )
1960
2003
  logger.info(
1961
2004
  f"Model {self.model_backend.model_type}, "
1962
2005
  f"index {self.model_backend.current_model_index}, "
2006
+ f"iteration {current_iteration}, "
1963
2007
  f"processed these messages: {sanitized_messages}"
1964
2008
  )
1965
2009
  if not isinstance(response, ChatCompletion):
@@ -1973,10 +2017,27 @@ class ChatAgent(BaseAgent):
1973
2017
  self,
1974
2018
  openai_messages: List[OpenAIMessage],
1975
2019
  num_tokens: int,
2020
+ current_iteration: int = 0,
1976
2021
  response_format: Optional[Type[BaseModel]] = None,
1977
2022
  tool_schemas: Optional[List[Dict[str, Any]]] = None,
2023
+ prev_num_openai_messages: int = 0,
1978
2024
  ) -> ModelResponse:
1979
- r"""Internal function for agent step model response."""
2025
+ r"""Internal function for agent async step model response.
2026
+ Args:
2027
+ openai_messages (List[OpenAIMessage]): The OpenAI messages
2028
+ to process.
2029
+ num_tokens (int): The number of tokens in the context.
2030
+ current_iteration (int): The current iteration of the step.
2031
+ response_format (Optional[Type[BaseModel]]): The response
2032
+ format to use.
2033
+ tool_schemas (Optional[List[Dict[str, Any]]]): The tool schemas
2034
+ to use.
2035
+ prev_num_openai_messages (int): The number of openai messages
2036
+ logged in the previous iteration.
2037
+
2038
+ Returns:
2039
+ ModelResponse: The model response.
2040
+ """
1980
2041
 
1981
2042
  response = None
1982
2043
  try:
@@ -2024,11 +2085,12 @@ class ChatAgent(BaseAgent):
2024
2085
  )
2025
2086
 
2026
2087
  sanitized_messages = self._sanitize_messages_for_logging(
2027
- openai_messages
2088
+ openai_messages, prev_num_openai_messages
2028
2089
  )
2029
2090
  logger.info(
2030
2091
  f"Model {self.model_backend.model_type}, "
2031
2092
  f"index {self.model_backend.current_model_index}, "
2093
+ f"iteration {current_iteration}, "
2032
2094
  f"processed these messages: {sanitized_messages}"
2033
2095
  )
2034
2096
  if not isinstance(response, ChatCompletion):
@@ -2038,12 +2100,16 @@ class ChatAgent(BaseAgent):
2038
2100
  )
2039
2101
  return self._handle_batch_response(response)
2040
2102
 
2041
- def _sanitize_messages_for_logging(self, messages):
2103
+ def _sanitize_messages_for_logging(
2104
+ self, messages, prev_num_openai_messages: int
2105
+ ):
2042
2106
  r"""Sanitize OpenAI messages for logging by replacing base64 image
2043
2107
  data with a simple message and a link to view the image.
2044
2108
 
2045
2109
  Args:
2046
2110
  messages (List[OpenAIMessage]): The OpenAI messages to sanitize.
2111
+ prev_num_openai_messages (int): The number of openai messages
2112
+ logged in the previous iteration.
2047
2113
 
2048
2114
  Returns:
2049
2115
  List[OpenAIMessage]: The sanitized OpenAI messages.
@@ -2056,7 +2122,7 @@ class ChatAgent(BaseAgent):
2056
2122
  # Create a copy of messages for logging to avoid modifying the
2057
2123
  # original messages
2058
2124
  sanitized_messages = []
2059
- for msg in messages:
2125
+ for msg in messages[prev_num_openai_messages:]:
2060
2126
  if isinstance(msg, dict):
2061
2127
  sanitized_msg = msg.copy()
2062
2128
  # Check if content is a list (multimodal content with images)
@@ -2339,6 +2405,7 @@ class ChatAgent(BaseAgent):
2339
2405
  info=info,
2340
2406
  )
2341
2407
 
2408
+ @observe()
2342
2409
  def _execute_tool(
2343
2410
  self,
2344
2411
  tool_call_request: ToolCallRequest,
@@ -2373,7 +2440,7 @@ class ChatAgent(BaseAgent):
2373
2440
  error_msg = f"Error executing tool '{func_name}': {e!s}"
2374
2441
  result = f"Tool execution failed: {error_msg}"
2375
2442
  mask_flag = False
2376
- logging.warning(error_msg)
2443
+ logger.warning(f"{error_msg} with result: {result}")
2377
2444
 
2378
2445
  # Check if result is a ToolResult with images
2379
2446
  images_to_attach = None
@@ -2384,6 +2451,7 @@ class ChatAgent(BaseAgent):
2384
2451
  tool_record = self._record_tool_calling(
2385
2452
  func_name, args, result, tool_call_id, mask_output=mask_flag
2386
2453
  )
2454
+ logger.info(f"Tool calling record:\n{tool_record}")
2387
2455
 
2388
2456
  # Store images for later attachment to next user message
2389
2457
  if images_to_attach:
@@ -2594,6 +2662,9 @@ class ChatAgent(BaseAgent):
2594
2662
  while True:
2595
2663
  # Check termination condition
2596
2664
  if self.stop_event and self.stop_event.is_set():
2665
+ logger.info(
2666
+ f"Termination triggered at iteration " f"{iteration_count}"
2667
+ )
2597
2668
  yield self._step_terminate(
2598
2669
  num_tokens, tool_call_records, "termination_triggered"
2599
2670
  )
@@ -3252,6 +3323,9 @@ class ChatAgent(BaseAgent):
3252
3323
  while True:
3253
3324
  # Check termination condition
3254
3325
  if self.stop_event and self.stop_event.is_set():
3326
+ logger.info(
3327
+ f"Termination triggered at iteration " f"{iteration_count}"
3328
+ )
3255
3329
  yield self._step_terminate(
3256
3330
  num_tokens, tool_call_records, "termination_triggered"
3257
3331
  )
@@ -23,6 +23,7 @@ from openai.lib.streaming.chat import (
23
23
  from pydantic import BaseModel
24
24
 
25
25
  from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
26
+ from camel.logger import get_logger
26
27
  from camel.messages import OpenAIMessage
27
28
  from camel.models import BaseModelBackend
28
29
  from camel.types import (
@@ -39,6 +40,8 @@ from camel.utils import (
39
40
  update_langfuse_trace,
40
41
  )
41
42
 
43
+ logger = get_logger(__name__)
44
+
42
45
  if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
43
46
  try:
44
47
  from langfuse.decorators import observe
@@ -273,17 +276,23 @@ class OpenAIModel(BaseModelBackend):
273
276
 
274
277
  # Update Langfuse trace with current agent session and metadata
275
278
  agent_session_id = get_current_agent_session_id()
279
+ model_type_str = str(self.model_type)
280
+ if not agent_session_id:
281
+ agent_session_id = "no-session-id"
282
+ metadata = {
283
+ "source": "camel",
284
+ "agent_id": agent_session_id,
285
+ "agent_type": "camel_chat_agent",
286
+ "model_type": model_type_str,
287
+ }
288
+ metadata = {k: str(v) for k, v in metadata.items()}
276
289
  if agent_session_id:
277
290
  update_langfuse_trace(
278
291
  session_id=agent_session_id,
279
- metadata={
280
- "source": "camel",
281
- "agent_id": agent_session_id,
282
- "agent_type": "camel_chat_agent",
283
- "model_type": str(self.model_type),
284
- },
285
- tags=["CAMEL-AI", str(self.model_type)],
292
+ metadata=metadata,
293
+ tags=["CAMEL-AI", model_type_str],
286
294
  )
295
+ logger.info(f"metadata: {metadata}")
287
296
 
288
297
  messages = self._adapt_messages_for_o1_models(messages)
289
298
  response_format = response_format or self.model_config_dict.get(
@@ -342,17 +351,22 @@ class OpenAIModel(BaseModelBackend):
342
351
 
343
352
  # Update Langfuse trace with current agent session and metadata
344
353
  agent_session_id = get_current_agent_session_id()
345
- if agent_session_id:
354
+ model_type_str = str(self.model_type)
355
+ if not agent_session_id:
356
+ agent_session_id = "no-session-id"
357
+ metadata = {
358
+ "source": "camel",
359
+ "agent_id": agent_session_id,
360
+ "agent_type": "camel_chat_agent",
361
+ "model_type": model_type_str,
362
+ }
363
+ metadata = {k: str(v) for k, v in metadata.items()}
346
364
  update_langfuse_trace(
347
365
  session_id=agent_session_id,
348
- metadata={
349
- "source": "camel",
350
- "agent_id": agent_session_id,
351
- "agent_type": "camel_chat_agent",
352
- "model_type": str(self.model_type),
353
- },
354
- tags=["CAMEL-AI", str(self.model_type)],
366
+ metadata=metadata,
367
+ tags=["CAMEL-AI", model_type_str],
355
368
  )
369
+ logger.info(f"metadata: {metadata}")
356
370
 
357
371
  messages = self._adapt_messages_for_o1_models(messages)
358
372
  response_format = response_format or self.model_config_dict.get(
@@ -74,6 +74,7 @@ from .jina_reranker_toolkit import JinaRerankerToolkit
74
74
  from .pulse_mcp_search_toolkit import PulseMCPSearchToolkit
75
75
  from .klavis_toolkit import KlavisToolkit
76
76
  from .aci_toolkit import ACIToolkit
77
+ from .origene_mcp_toolkit import OrigeneToolkit
77
78
  from .playwright_mcp_toolkit import PlaywrightMCPToolkit
78
79
  from .wolfram_alpha_toolkit import WolframAlphaToolkit
79
80
  from .task_planning_toolkit import TaskPlanningToolkit
@@ -141,6 +142,7 @@ __all__ = [
141
142
  'OpenAIAgentToolkit',
142
143
  'SearxNGToolkit',
143
144
  'JinaRerankerToolkit',
145
+ 'OrigeneToolkit',
144
146
  'PulseMCPSearchToolkit',
145
147
  'KlavisToolkit',
146
148
  'ACIToolkit',