camel-ai 0.2.69a4__py3-none-any.whl → 0.2.69a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.69a4'
17
+ __version__ = '0.2.69a7'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -406,7 +406,10 @@ class ChatAgent(BaseAgent):
406
406
  # List of tuples (platform, type)
407
407
  resolved_models_list = []
408
408
  for model_spec in model_list:
409
- platform, type_ = model_spec[0], model_spec[1] # type: ignore[index]
409
+ platform, type_ = ( # type: ignore[index]
410
+ model_spec[0],
411
+ model_spec[1],
412
+ )
410
413
  resolved_models_list.append(
411
414
  ModelFactory.create(
412
415
  model_platform=platform, model_type=type_
@@ -846,6 +849,185 @@ class ChatAgent(BaseAgent):
846
849
  except ValidationError:
847
850
  return False
848
851
 
852
+ def _check_tools_strict_compatibility(self) -> bool:
853
+ r"""Check if all tools are compatible with OpenAI strict mode.
854
+
855
+ Returns:
856
+ bool: True if all tools are strict mode compatible,
857
+ False otherwise.
858
+ """
859
+ tool_schemas = self._get_full_tool_schemas()
860
+ for schema in tool_schemas:
861
+ if not schema.get("function", {}).get("strict", True):
862
+ return False
863
+ return True
864
+
865
+ def _convert_response_format_to_prompt(
866
+ self, response_format: Type[BaseModel]
867
+ ) -> str:
868
+ r"""Convert a Pydantic response format to a prompt instruction.
869
+
870
+ Args:
871
+ response_format (Type[BaseModel]): The Pydantic model class.
872
+
873
+ Returns:
874
+ str: A prompt instruction requesting the specific format.
875
+ """
876
+ try:
877
+ # Get the JSON schema from the Pydantic model
878
+ schema = response_format.model_json_schema()
879
+
880
+ # Create a prompt based on the schema
881
+ format_instruction = (
882
+ "\n\nPlease respond in the following JSON format:\n" "{\n"
883
+ )
884
+
885
+ properties = schema.get("properties", {})
886
+ for field_name, field_info in properties.items():
887
+ field_type = field_info.get("type", "string")
888
+ description = field_info.get("description", "")
889
+
890
+ if field_type == "array":
891
+ format_instruction += (
892
+ f' "{field_name}": ["array of values"]'
893
+ )
894
+ elif field_type == "object":
895
+ format_instruction += f' "{field_name}": {{"object"}}'
896
+ elif field_type == "boolean":
897
+ format_instruction += f' "{field_name}": true'
898
+ elif field_type == "number":
899
+ format_instruction += f' "{field_name}": 0'
900
+ else:
901
+ format_instruction += f' "{field_name}": "string value"'
902
+
903
+ if description:
904
+ format_instruction += f' // {description}'
905
+
906
+ # Add comma if not the last item
907
+ if field_name != list(properties.keys())[-1]:
908
+ format_instruction += ","
909
+ format_instruction += "\n"
910
+
911
+ format_instruction += "}"
912
+ return format_instruction
913
+
914
+ except Exception as e:
915
+ logger.warning(
916
+ f"Failed to convert response_format to prompt: {e}. "
917
+ f"Using generic format instruction."
918
+ )
919
+ return (
920
+ "\n\nPlease respond in a structured JSON format "
921
+ "that matches the requested schema."
922
+ )
923
+
924
+ def _handle_response_format_with_non_strict_tools(
925
+ self,
926
+ input_message: Union[BaseMessage, str],
927
+ response_format: Optional[Type[BaseModel]] = None,
928
+ ) -> Tuple[Union[BaseMessage, str], Optional[Type[BaseModel]], bool]:
929
+ r"""Handle response format when tools are not strict mode compatible.
930
+
931
+ Args:
932
+ input_message: The original input message.
933
+ response_format: The requested response format.
934
+
935
+ Returns:
936
+ Tuple: (modified_message, modified_response_format,
937
+ used_prompt_formatting)
938
+ """
939
+ if response_format is None:
940
+ return input_message, response_format, False
941
+
942
+ # Check if tools are strict mode compatible
943
+ if self._check_tools_strict_compatibility():
944
+ return input_message, response_format, False
945
+
946
+ # Tools are not strict compatible, convert to prompt
947
+ logger.info(
948
+ "Non-strict tools detected. Converting response_format to "
949
+ "prompt-based formatting."
950
+ )
951
+
952
+ format_prompt = self._convert_response_format_to_prompt(
953
+ response_format
954
+ )
955
+
956
+ # Modify the message to include format instruction
957
+ modified_message: Union[BaseMessage, str]
958
+ if isinstance(input_message, str):
959
+ modified_message = input_message + format_prompt
960
+ else:
961
+ modified_message = input_message.create_new_instance(
962
+ input_message.content + format_prompt
963
+ )
964
+
965
+ # Return None for response_format to avoid strict mode conflicts
966
+ # and True to indicate we used prompt formatting
967
+ return modified_message, None, True
968
+
969
+ def _apply_prompt_based_parsing(
970
+ self,
971
+ response: ModelResponse,
972
+ original_response_format: Type[BaseModel],
973
+ ) -> None:
974
+ r"""Apply manual parsing when using prompt-based formatting.
975
+
976
+ Args:
977
+ response: The model response to parse.
978
+ original_response_format: The original response format class.
979
+ """
980
+ for message in response.output_messages:
981
+ if message.content:
982
+ try:
983
+ # Try to extract JSON from the response content
984
+ import json
985
+ import re
986
+
987
+ from pydantic import ValidationError
988
+
989
+ # Try to find JSON in the content
990
+ content = message.content.strip()
991
+
992
+ # Try direct parsing first
993
+ try:
994
+ parsed_json = json.loads(content)
995
+ message.parsed = (
996
+ original_response_format.model_validate(
997
+ parsed_json
998
+ )
999
+ )
1000
+ continue
1001
+ except (json.JSONDecodeError, ValidationError):
1002
+ pass
1003
+
1004
+ # Try to extract JSON from text
1005
+ json_pattern = r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}'
1006
+ json_matches = re.findall(json_pattern, content, re.DOTALL)
1007
+
1008
+ for json_str in json_matches:
1009
+ try:
1010
+ parsed_json = json.loads(json_str)
1011
+ message.parsed = (
1012
+ original_response_format.model_validate(
1013
+ parsed_json
1014
+ )
1015
+ )
1016
+ # Update content to just the JSON for consistency
1017
+ message.content = json.dumps(parsed_json)
1018
+ break
1019
+ except (json.JSONDecodeError, ValidationError):
1020
+ continue
1021
+
1022
+ if not message.parsed:
1023
+ logger.warning(
1024
+ f"Failed to parse JSON from response: "
1025
+ f"{content[:100]}..."
1026
+ )
1027
+
1028
+ except Exception as e:
1029
+ logger.warning(f"Error during prompt-based parsing: {e}")
1030
+
849
1031
  def _format_response_if_needed(
850
1032
  self,
851
1033
  response: ModelResponse,
@@ -932,6 +1114,14 @@ class ChatAgent(BaseAgent):
932
1114
  except ImportError:
933
1115
  pass # Langfuse not available
934
1116
 
1117
+ # Handle response format compatibility with non-strict tools
1118
+ original_response_format = response_format
1119
+ input_message, response_format, used_prompt_formatting = (
1120
+ self._handle_response_format_with_non_strict_tools(
1121
+ input_message, response_format
1122
+ )
1123
+ )
1124
+
935
1125
  # Convert input message to BaseMessage if necessary
936
1126
  if isinstance(input_message, str):
937
1127
  input_message = BaseMessage.make_user_message(
@@ -1014,6 +1204,13 @@ class ChatAgent(BaseAgent):
1014
1204
  break
1015
1205
 
1016
1206
  self._format_response_if_needed(response, response_format)
1207
+
1208
+ # Apply manual parsing if we used prompt-based formatting
1209
+ if used_prompt_formatting and original_response_format:
1210
+ self._apply_prompt_based_parsing(
1211
+ response, original_response_format
1212
+ )
1213
+
1017
1214
  self._record_final_output(response.output_messages)
1018
1215
 
1019
1216
  return self._convert_to_chatagent_response(
@@ -1065,6 +1262,14 @@ class ChatAgent(BaseAgent):
1065
1262
  except ImportError:
1066
1263
  pass # Langfuse not available
1067
1264
 
1265
+ # Handle response format compatibility with non-strict tools
1266
+ original_response_format = response_format
1267
+ input_message, response_format, used_prompt_formatting = (
1268
+ self._handle_response_format_with_non_strict_tools(
1269
+ input_message, response_format
1270
+ )
1271
+ )
1272
+
1068
1273
  if isinstance(input_message, str):
1069
1274
  input_message = BaseMessage.make_user_message(
1070
1275
  role_name="User", content=input_message
@@ -1098,6 +1303,11 @@ class ChatAgent(BaseAgent):
1098
1303
  )
1099
1304
  iteration_count += 1
1100
1305
 
1306
+ # Accumulate API token usage
1307
+ self._update_token_usage_tracker(
1308
+ step_token_usage, response.usage_dict
1309
+ )
1310
+
1101
1311
  # Terminate Agent if stop_event is set
1102
1312
  if self.stop_event and self.stop_event.is_set():
1103
1313
  # Use the _step_terminate to terminate the agent with reason
@@ -1139,13 +1349,14 @@ class ChatAgent(BaseAgent):
1139
1349
  break
1140
1350
 
1141
1351
  await self._aformat_response_if_needed(response, response_format)
1142
- self._record_final_output(response.output_messages)
1143
1352
 
1144
- # Create token usage tracker for this step
1145
- step_token_usage = self._create_token_usage_tracker()
1353
+ # Apply manual parsing if we used prompt-based formatting
1354
+ if used_prompt_formatting and original_response_format:
1355
+ self._apply_prompt_based_parsing(
1356
+ response, original_response_format
1357
+ )
1146
1358
 
1147
- # Update with response usage
1148
- self._update_token_usage_tracker(step_token_usage, response.usage_dict)
1359
+ self._record_final_output(response.output_messages)
1149
1360
 
1150
1361
  return self._convert_to_chatagent_response(
1151
1362
  response,
@@ -1924,7 +2135,9 @@ class ChatAgent(BaseAgent):
1924
2135
  schema for schema in self._external_tool_schemas.values()
1925
2136
  ],
1926
2137
  response_terminators=self.response_terminators,
1927
- scheduling_strategy=self.model_backend.scheduling_strategy.__name__,
2138
+ scheduling_strategy=(
2139
+ self.model_backend.scheduling_strategy.__name__
2140
+ ),
1928
2141
  max_iteration=self.max_iteration,
1929
2142
  stop_event=self.stop_event,
1930
2143
  )
@@ -79,6 +79,9 @@ class InternalPythonInterpreter(BaseInterpreter):
79
79
  (default: :obj:`False`)
80
80
  raise_error (bool, optional): Raise error if the interpreter fails.
81
81
  (default: :obj:`False`)
82
+ allow_builtins (bool, optional): If `True`, safe built-in functions
83
+ like print, len, str, etc. are added to the action space.
84
+ (default: :obj:`True`)
82
85
  """
83
86
 
84
87
  _CODE_TYPES: ClassVar[List[str]] = ["python", "py", "python3", "python2"]
@@ -89,16 +92,62 @@ class InternalPythonInterpreter(BaseInterpreter):
89
92
  import_white_list: Optional[List[str]] = None,
90
93
  unsafe_mode: bool = False,
91
94
  raise_error: bool = False,
95
+ allow_builtins: bool = True,
92
96
  ) -> None:
93
97
  self.action_space = action_space or dict()
94
- # Add print to action space
95
- self.action_space['print'] = print
96
98
  self.state = self.action_space.copy()
97
99
  self.fuzz_state: Dict[str, Any] = dict()
98
100
  self.import_white_list = import_white_list or list()
99
101
  self.raise_error = raise_error
100
102
  self.unsafe_mode = unsafe_mode
101
103
 
104
+ # Add safe built-in functions if allowed
105
+ if allow_builtins:
106
+ self._add_safe_builtins()
107
+
108
+ def _add_safe_builtins(self):
109
+ r"""Add safe built-in functions to the action space."""
110
+ safe_builtins = {
111
+ 'print': print,
112
+ 'len': len,
113
+ 'str': str,
114
+ 'int': int,
115
+ 'float': float,
116
+ 'bool': bool,
117
+ 'list': list,
118
+ 'dict': dict,
119
+ 'tuple': tuple,
120
+ 'set': set,
121
+ 'abs': abs,
122
+ 'min': min,
123
+ 'max': max,
124
+ 'sum': sum,
125
+ 'sorted': sorted,
126
+ 'reversed': reversed,
127
+ 'enumerate': enumerate,
128
+ 'zip': zip,
129
+ 'range': range,
130
+ 'round': round,
131
+ 'type': type,
132
+ 'isinstance': isinstance,
133
+ 'hasattr': hasattr,
134
+ 'getattr': getattr,
135
+ 'setattr': setattr,
136
+ 'dir': dir,
137
+ 'help': help,
138
+ 'map': map,
139
+ 'filter': filter,
140
+ 'any': any,
141
+ 'all': all,
142
+ 'ord': ord,
143
+ 'chr': chr,
144
+ 'bin': bin,
145
+ 'oct': oct,
146
+ 'hex': hex,
147
+ }
148
+ self.action_space.update(safe_builtins)
149
+ self.state.update(safe_builtins)
150
+
102
151
  def run(self, code: str, code_type: str = "python") -> str:
103
152
  r"""Executes the given code with specified code type in the
104
153
  interpreter.
@@ -155,16 +155,21 @@ class ScoreBasedContextCreator(BaseContextCreator):
155
155
  # ======================
156
156
  # 6. Truncation Logic with Tool Call Awareness
157
157
  # ======================
158
- logger.warning(
159
- f"Context truncation required "
160
- f"({total_tokens} > {self.token_limit}), "
161
- f"pruning low-score messages."
162
- )
163
-
164
158
  remaining_units = self._truncate_with_tool_call_awareness(
165
159
  regular_units, tool_call_groups, system_tokens
166
160
  )
167
161
 
162
+ # Log only after truncation is actually performed so that both
163
+ # the original and the final token counts are visible.
164
+ tokens_after = system_tokens + sum(
165
+ u.num_tokens for u in remaining_units
166
+ )
167
+ logger.warning(
168
+ "Context truncation performed: "
169
+ f"before={total_tokens}, after={tokens_after}, "
170
+ f"limit={self.token_limit}"
171
+ )
172
+
168
173
  # ======================
169
174
  # 7. Output Assembly
170
175
  # ======================
camel/messages/base.py CHANGED
@@ -69,7 +69,7 @@ class BaseMessage:
69
69
  image_detail (Literal["auto", "low", "high"]): Detail level of the
70
70
  images associated with the message. (default: :obj:`auto`)
71
71
  video_detail (Literal["auto", "low", "high"]): Detail level of the
72
- videos associated with the message. (default: :obj:`low`)
72
+ videos associated with the message. (default: :obj:`auto`)
73
73
  parsed: Optional[Union[Type[BaseModel], dict]]: Optional object which
74
74
  is parsed from the content. (default: :obj:`None`)
75
75
  """
@@ -82,7 +82,7 @@ class BaseMessage:
82
82
  video_bytes: Optional[bytes] = None
83
83
  image_list: Optional[List[Image.Image]] = None
84
84
  image_detail: Literal["auto", "low", "high"] = "auto"
85
- video_detail: Literal["auto", "low", "high"] = "low"
85
+ video_detail: Literal["auto", "low", "high"] = "auto"
86
86
  parsed: Optional[Union[BaseModel, dict]] = None
87
87
 
88
88
  @classmethod
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import abc
15
+ import os
15
16
  import re
16
17
  from abc import ABC, abstractmethod
17
18
  from typing import Any, Dict, List, Optional, Type, Union
@@ -94,6 +95,12 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
94
95
  self._token_counter = token_counter
95
96
  self._timeout = timeout
96
97
  self._max_retries = max_retries
98
+ # Initialize logging configuration
99
+ self._log_enabled = (
100
+ os.environ.get("CAMEL_MODEL_LOG_ENABLED", "False").lower()
101
+ == "true"
102
+ )
103
+ self._log_dir = os.environ.get("CAMEL_LOG_DIR", "camel_logs")
97
104
  self.check_model_config()
98
105
 
99
106
  @property
@@ -232,6 +239,68 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
232
239
 
233
240
  return formatted_messages
234
241
 
242
+ def _log_request(self, messages: List[OpenAIMessage]) -> Optional[str]:
243
+ r"""Log the request messages to a JSON file if logging is enabled.
244
+
245
+ Args:
246
+ messages (List[OpenAIMessage]): The messages to log.
247
+
248
+ Returns:
249
+ Optional[str]: The path to the log file if logging is enabled,
250
+ None otherwise.
251
+ """
252
+ if not self._log_enabled:
253
+ return None
254
+
255
+ import json
256
+ from datetime import datetime
257
+
258
+ os.makedirs(self._log_dir, exist_ok=True)
259
+
260
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')
261
+ log_file_path = os.path.join(self._log_dir, f"conv_{timestamp}.json")
262
+
263
+ log_entry = {
264
+ "request_timestamp": datetime.now().isoformat(),
265
+ "model": str(self.model_type),
266
+ "request": {"messages": messages},
267
+ }
268
+
269
+ with open(log_file_path, "w") as f:
270
+ json.dump(log_entry, f, indent=4)
271
+
272
+ return log_file_path
273
+
274
+ def _log_response(self, log_path: str, response: Any) -> None:
275
+ r"""Log the response to the existing log file.
276
+
277
+ Args:
278
+ log_path (str): The path to the log file.
279
+ response (Any): The response to log.
280
+ """
281
+ if not self._log_enabled or not log_path:
282
+ return
283
+
284
+ import json
285
+ from datetime import datetime
286
+
287
+ with open(log_path, "r+") as f:
288
+ log_data = json.load(f)
289
+
290
+ log_data["response_timestamp"] = datetime.now().isoformat()
291
+ if isinstance(response, BaseModel):
292
+ log_data["response"] = response.model_dump()
293
+ else:
294
+ try:
295
+ json.dumps(response)
296
+ log_data["response"] = response
297
+ except TypeError:
298
+ log_data["response"] = str(response)
299
+
300
+ f.seek(0)
301
+ json.dump(log_data, f, indent=4)
302
+ f.truncate()
303
+
235
304
  @abstractmethod
236
305
  def _run(
237
306
  self,
@@ -273,13 +342,23 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
273
342
  `ChatCompletion` in the non-stream mode, or
274
343
  `Stream[ChatCompletionChunk]` in the stream mode.
275
344
  """
345
+ # Log the request if logging is enabled
346
+ log_path = self._log_request(messages)
347
+
276
348
  # None -> use default tools
277
349
  if tools is None:
278
350
  tools = self.model_config_dict.get("tools", None)
279
351
  # Empty -> use no tools
280
352
  elif not tools:
281
353
  tools = None
282
- return self._run(messages, response_format, tools)
354
+
355
+ result = self._run(messages, response_format, tools)
356
+
357
+ # Log the response if logging is enabled
358
+ if log_path:
359
+ self._log_response(log_path, result)
360
+
361
+ return result
283
362
 
284
363
  async def arun(
285
364
  self,
@@ -304,11 +383,21 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
304
383
  `ChatCompletion` in the non-stream mode, or
305
384
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
306
385
  """
386
+ # Log the request if logging is enabled
387
+ log_path = self._log_request(messages)
388
+
307
389
  if tools is None:
308
390
  tools = self.model_config_dict.get("tools", None)
309
391
  elif not tools:
310
392
  tools = None
311
- return await self._arun(messages, response_format, tools)
393
+
394
+ result = await self._arun(messages, response_format, tools)
395
+
396
+ # Log the response if logging is enabled
397
+ if log_path:
398
+ self._log_response(log_path, result)
399
+
400
+ return result
312
401
 
313
402
  @abstractmethod
314
403
  def check_model_config(self):