camel-ai 0.2.10__py3-none-any.whl → 0.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (76) hide show
  1. camel/__init__.py +6 -1
  2. camel/agents/chat_agent.py +87 -6
  3. camel/agents/deductive_reasoner_agent.py +4 -1
  4. camel/benchmarks/__init__.py +18 -0
  5. camel/benchmarks/base.py +152 -0
  6. camel/benchmarks/gaia.py +478 -0
  7. camel/configs/__init__.py +6 -0
  8. camel/configs/mistral_config.py +0 -3
  9. camel/configs/nvidia_config.py +70 -0
  10. camel/configs/ollama_config.py +4 -2
  11. camel/configs/sglang_config.py +71 -0
  12. camel/configs/vllm_config.py +10 -1
  13. camel/data_collector/__init__.py +19 -0
  14. camel/data_collector/alpaca_collector.py +127 -0
  15. camel/data_collector/base.py +211 -0
  16. camel/data_collector/sharegpt_collector.py +205 -0
  17. camel/datahubs/__init__.py +23 -0
  18. camel/datahubs/base.py +136 -0
  19. camel/datahubs/huggingface.py +433 -0
  20. camel/datahubs/models.py +22 -0
  21. camel/embeddings/vlm_embedding.py +4 -1
  22. camel/interpreters/__init__.py +2 -0
  23. camel/interpreters/docker_interpreter.py +7 -2
  24. camel/interpreters/e2b_interpreter.py +136 -0
  25. camel/interpreters/subprocess_interpreter.py +7 -2
  26. camel/loaders/__init__.py +3 -1
  27. camel/loaders/base_io.py +41 -41
  28. camel/loaders/firecrawl_reader.py +0 -3
  29. camel/logger.py +112 -0
  30. camel/messages/__init__.py +3 -1
  31. camel/messages/base.py +10 -7
  32. camel/messages/conversion/__init__.py +3 -1
  33. camel/messages/conversion/alpaca.py +122 -0
  34. camel/models/__init__.py +7 -0
  35. camel/models/anthropic_model.py +14 -4
  36. camel/models/base_model.py +28 -0
  37. camel/models/groq_model.py +1 -1
  38. camel/models/model_factory.py +6 -0
  39. camel/models/model_manager.py +212 -0
  40. camel/models/nvidia_model.py +141 -0
  41. camel/models/ollama_model.py +12 -0
  42. camel/models/openai_model.py +0 -25
  43. camel/models/reward/__init__.py +22 -0
  44. camel/models/reward/base_reward_model.py +58 -0
  45. camel/models/reward/evaluator.py +63 -0
  46. camel/models/reward/nemotron_model.py +112 -0
  47. camel/models/sglang_model.py +225 -0
  48. camel/models/vllm_model.py +1 -1
  49. camel/personas/persona_hub.py +2 -2
  50. camel/retrievers/vector_retriever.py +22 -5
  51. camel/schemas/openai_converter.py +2 -2
  52. camel/societies/babyagi_playing.py +4 -1
  53. camel/societies/workforce/role_playing_worker.py +2 -2
  54. camel/societies/workforce/single_agent_worker.py +2 -2
  55. camel/societies/workforce/workforce.py +3 -3
  56. camel/storages/object_storages/amazon_s3.py +2 -2
  57. camel/storages/object_storages/azure_blob.py +2 -2
  58. camel/storages/object_storages/google_cloud.py +2 -2
  59. camel/toolkits/__init__.py +5 -0
  60. camel/toolkits/code_execution.py +42 -4
  61. camel/toolkits/function_tool.py +41 -0
  62. camel/toolkits/human_toolkit.py +1 -0
  63. camel/toolkits/math_toolkit.py +47 -16
  64. camel/toolkits/meshy_toolkit.py +185 -0
  65. camel/toolkits/search_toolkit.py +154 -2
  66. camel/toolkits/stripe_toolkit.py +273 -0
  67. camel/toolkits/twitter_toolkit.py +3 -0
  68. camel/types/__init__.py +2 -0
  69. camel/types/enums.py +68 -10
  70. camel/utils/commons.py +22 -5
  71. camel/utils/token_counting.py +26 -11
  72. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/METADATA +13 -6
  73. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/RECORD +76 -51
  74. /camel/messages/conversion/{models.py → conversation_models.py} +0 -0
  75. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/LICENSE +0 -0
  76. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/WHEEL +0 -0
camel/__init__.py CHANGED
@@ -12,9 +12,14 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
- __version__ = '0.2.10'
15
+ from camel.logger import disable_logging, enable_logging, set_log_level
16
+
17
+ __version__ = '0.2.12'
16
18
 
17
19
  __all__ = [
18
20
  '__version__',
19
21
  'camel',
22
+ 'disable_logging',
23
+ 'enable_logging',
24
+ 'set_log_level',
20
25
  ]
@@ -21,6 +21,7 @@ from collections import defaultdict
21
21
  from typing import (
22
22
  TYPE_CHECKING,
23
23
  Any,
24
+ Callable,
24
25
  Dict,
25
26
  List,
26
27
  Optional,
@@ -41,7 +42,12 @@ from camel.memories import (
41
42
  ScoreBasedContextCreator,
42
43
  )
43
44
  from camel.messages import BaseMessage, FunctionCallingMessage, OpenAIMessage
44
- from camel.models import BaseModelBackend, ModelFactory
45
+ from camel.models import (
46
+ BaseModelBackend,
47
+ ModelFactory,
48
+ ModelManager,
49
+ ModelProcessingError,
50
+ )
45
51
  from camel.responses import ChatAgentResponse
46
52
  from camel.types import (
47
53
  ChatCompletion,
@@ -145,12 +151,16 @@ class ChatAgent(BaseAgent):
145
151
  response_terminators (List[ResponseTerminator], optional): List of
146
152
  :obj:`ResponseTerminator` bind to one chat agent.
147
153
  (default: :obj:`None`)
154
+ scheduling_strategy (str): name of function that defines how to select
155
+ the next model in ModelManager. (default: :str:`round_robin`)
148
156
  """
149
157
 
150
158
  def __init__(
151
159
  self,
152
160
  system_message: Optional[Union[BaseMessage, str]] = None,
153
- model: Optional[BaseModelBackend] = None,
161
+ model: Optional[
162
+ Union[BaseModelBackend, List[BaseModelBackend]]
163
+ ] = None,
154
164
  memory: Optional[AgentMemory] = None,
155
165
  message_window_size: Optional[int] = None,
156
166
  token_limit: Optional[int] = None,
@@ -158,6 +168,7 @@ class ChatAgent(BaseAgent):
158
168
  tools: Optional[List[FunctionTool]] = None,
159
169
  external_tools: Optional[List[FunctionTool]] = None,
160
170
  response_terminators: Optional[List[ResponseTerminator]] = None,
171
+ scheduling_strategy: str = "round_robin",
161
172
  ) -> None:
162
173
  if isinstance(system_message, str):
163
174
  system_message = BaseMessage.make_assistant_message(
@@ -172,13 +183,14 @@ class ChatAgent(BaseAgent):
172
183
  self.role_type: RoleType = (
173
184
  getattr(system_message, 'role_type', None) or RoleType.ASSISTANT
174
185
  )
175
- self.model_backend: BaseModelBackend = (
186
+ self.model_backend = ModelManager(
176
187
  model
177
188
  if model is not None
178
189
  else ModelFactory.create(
179
190
  model_platform=ModelPlatformType.DEFAULT,
180
191
  model_type=ModelType.DEFAULT,
181
- )
192
+ ),
193
+ scheduling_strategy=scheduling_strategy,
182
194
  )
183
195
 
184
196
  self.model_type = self.model_backend.model_type
@@ -487,6 +499,13 @@ class ChatAgent(BaseAgent):
487
499
  "the model configuration and in the ChatAgent step."
488
500
  )
489
501
 
502
+ original_model_dict = self.model_backend.model_config_dict
503
+ if response_format and self.model_type in {"gpt-4o", "gpt-4o-mini"}:
504
+ self.model_backend.model_config_dict = original_model_dict.copy()
505
+ self.model_backend.model_config_dict["response_format"] = (
506
+ response_format
507
+ )
508
+
490
509
  if isinstance(input_message, str):
491
510
  input_message = BaseMessage.make_user_message(
492
511
  role_name='User', content=input_message
@@ -620,6 +639,7 @@ class ChatAgent(BaseAgent):
620
639
  try:
621
640
  openai_messages, num_tokens = self.memory.get_context()
622
641
  except RuntimeError as e:
642
+ self.model_backend.model_config_dict = original_model_dict
623
643
  return self._step_token_exceed(
624
644
  e.args[1], tool_call_records, "max_tokens_exceeded"
625
645
  )
@@ -655,6 +675,8 @@ class ChatAgent(BaseAgent):
655
675
  num_tokens,
656
676
  tool_call_request,
657
677
  )
678
+
679
+ self.model_backend.model_config_dict = original_model_dict
658
680
  return ChatAgentResponse(
659
681
  msgs=output_messages,
660
682
  terminated=self.terminated,
@@ -669,6 +691,7 @@ class ChatAgent(BaseAgent):
669
691
  if (
670
692
  response_format is not None
671
693
  and self.model_type.support_native_tool_calling
694
+ and self.model_type not in {"gpt-4o", "gpt-4o-mini"}
672
695
  ):
673
696
  (
674
697
  output_messages,
@@ -699,6 +722,7 @@ class ChatAgent(BaseAgent):
699
722
  "to record the selected message manually."
700
723
  )
701
724
 
725
+ self.model_backend.model_config_dict = original_model_dict
702
726
  return ChatAgentResponse(
703
727
  msgs=output_messages, terminated=self.terminated, info=info
704
728
  )
@@ -918,7 +942,7 @@ class ChatAgent(BaseAgent):
918
942
  )
919
943
 
920
944
  for base_message_item in output_messages:
921
- base_message_item.content = str(tool_call_record.result)
945
+ base_message_item.content = json.dumps(tool_call_record.result)
922
946
 
923
947
  # Recover the original tools
924
948
  self.func_dict = original_func_dict
@@ -945,8 +969,32 @@ class ChatAgent(BaseAgent):
945
969
  str,
946
970
  ]:
947
971
  r"""Internal function for agent step model response."""
972
+
973
+ response = None
948
974
  # Obtain the model's response
949
- response = self.model_backend.run(openai_messages)
975
+ for _ in range(len(self.model_backend.models)):
976
+ try:
977
+ response = self.model_backend.run(openai_messages)
978
+ break
979
+ except Exception as exc:
980
+ logger.error(
981
+ f"An error occurred while running model "
982
+ f"{self.model_backend.model_type}, "
983
+ f"index: {self.model_backend.current_model_index}",
984
+ exc_info=exc,
985
+ )
986
+ continue
987
+ if not response:
988
+ raise ModelProcessingError(
989
+ "Unable to process messages: none of the provided models "
990
+ "run succesfully."
991
+ )
992
+
993
+ logger.info(
994
+ f"Model {self.model_backend.model_type}, "
995
+ f"index {self.model_backend.current_model_index}, "
996
+ f"processed these messages: {openai_messages}"
997
+ )
950
998
 
951
999
  if isinstance(response, ChatCompletion):
952
1000
  output_messages, finish_reasons, usage_dict, response_id = (
@@ -1054,8 +1102,32 @@ class ChatAgent(BaseAgent):
1054
1102
  role_type=self.role_type,
1055
1103
  meta_dict=dict(),
1056
1104
  content=choice.message.content or "",
1105
+ parsed=getattr(choice.message, 'parsed', None),
1057
1106
  )
1107
+ # Process log probabilities and append to the message meta information
1108
+ if choice.logprobs is not None:
1109
+ tokens_logprobs = choice.logprobs.content
1110
+
1111
+ if tokens_logprobs is not None:
1112
+ # Extract and structure logprob information
1113
+ logprobs_info = [
1114
+ {
1115
+ "token": token_logprob.token,
1116
+ "logprob": token_logprob.logprob,
1117
+ "top_logprobs": [
1118
+ (top_logprob.token, top_logprob.logprob)
1119
+ for top_logprob in token_logprob.top_logprobs
1120
+ ],
1121
+ }
1122
+ for token_logprob in tokens_logprobs
1123
+ ]
1124
+ # Ensure meta_dict exists before adding logprobs info
1125
+ if chat_message.meta_dict is None:
1126
+ chat_message.meta_dict = {}
1127
+ chat_message.meta_dict["logprobs_info"] = logprobs_info
1128
+ # Append the processed chat message to output
1058
1129
  output_messages.append(chat_message)
1130
+
1059
1131
  finish_reasons = [
1060
1132
  str(choice.finish_reason) for choice in response.choices
1061
1133
  ]
@@ -1298,6 +1370,15 @@ class ChatAgent(BaseAgent):
1298
1370
  )
1299
1371
  return usage_dict
1300
1372
 
1373
+ def add_model_scheduling_strategy(self, name: str, strategy_fn: Callable):
1374
+ r"""Add a scheduling strategy method provided by user to ModelManger.
1375
+
1376
+ Args:
1377
+ name (str): The name of the strategy.
1378
+ strategy_fn (Callable): The scheduling strategy function.
1379
+ """
1380
+ self.model_backend.add_strategy(name, strategy_fn)
1381
+
1301
1382
  def __repr__(self) -> str:
1302
1383
  r"""Returns a string representation of the :obj:`ChatAgent`.
1303
1384
 
@@ -15,11 +15,14 @@ import re
15
15
  from typing import Dict, List, Optional, Union
16
16
 
17
17
  from camel.agents.chat_agent import ChatAgent
18
+ from camel.logger import get_logger
18
19
  from camel.messages import BaseMessage
19
20
  from camel.models import BaseModelBackend
20
21
  from camel.prompts import TextPrompt
21
22
  from camel.types import RoleType
22
23
 
24
+ logger = get_logger(__name__)
25
+
23
26
  # AgentOps decorator setting
24
27
  try:
25
28
  import os
@@ -253,7 +256,7 @@ square brackets)
253
256
  "Deduction failed. Error:\n" + f"{response.info}"
254
257
  )
255
258
  msg: BaseMessage = response.msg
256
- print(f"Message content:\n{msg.content}")
259
+ logger.info(f"Message content:\n{msg.content}")
257
260
 
258
261
  # Extract the conditions from the message
259
262
  conditions_dict = {
@@ -0,0 +1,18 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ from .base import BaseBenchmark
16
+ from .gaia import DefaultGAIARetriever, GAIABenchmark
17
+
18
+ __all__ = ["BaseBenchmark", "GAIABenchmark", "DefaultGAIARetriever"]
@@ -0,0 +1,152 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import logging
16
+ from abc import ABC, abstractmethod
17
+ from pathlib import Path
18
+ from typing import Any, Dict, List, Literal, Optional
19
+
20
+ from camel.agents import ChatAgent
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ class BaseBenchmark(ABC):
26
+ r"""Base class for benchmarks.
27
+
28
+ Attributes:
29
+ name (str): Name of the benchmark.
30
+ data_dir (str): Path to the data directory.
31
+ save_to (str): Path to save the results.
32
+ processes (int): Number of processes to use for parallel
33
+ processing. :(default: :obj:`1`)
34
+ """
35
+
36
+ def __init__(
37
+ self, name: str, data_dir: str, save_to: str, processes: int = 1
38
+ ):
39
+ r"""Initialize the benchmark.
40
+
41
+ Args:
42
+ name (str): Name of the benchmark.
43
+ data_dir (str): Path to the data directory.
44
+ save_to (str): Path to save the results.
45
+ processes (int): Number of processes to use for parallel
46
+ processing. :(default: :obj:`1`)
47
+
48
+ """
49
+ self.name = name
50
+ self.data_dir = Path(data_dir)
51
+ self.processes = processes
52
+ self.save_to = save_to
53
+ if not self.data_dir.exists():
54
+ logger.info(
55
+ f"Data directory {data_dir} does not exist. Creating it."
56
+ )
57
+ self.data_dir.mkdir(parents=True, exist_ok=True)
58
+ if not self.data_dir.is_dir():
59
+ raise NotADirectoryError(
60
+ f"Data directory {data_dir} is not a directory"
61
+ )
62
+ self._data: Dict[str, List[Dict[str, Any]]] = dict()
63
+ self._results: List[Dict[str, Any]] = []
64
+
65
+ @abstractmethod
66
+ def download(self) -> "BaseBenchmark":
67
+ r"""Download the benchmark data.
68
+
69
+ Returns:
70
+ BaseBenchmark: The benchmark instance.
71
+ """
72
+ pass
73
+
74
+ @abstractmethod
75
+ def load(self, force_download: bool = False) -> "BaseBenchmark":
76
+ r"""Load the benchmark data.
77
+
78
+ Args:
79
+ force_download (bool): Whether to force download the data.
80
+
81
+ Returns:
82
+ BaseBenchmark: The benchmark instance.
83
+ """
84
+ pass
85
+
86
+ @property
87
+ def train(self) -> List[Dict[str, Any]]:
88
+ r"""Get the training data.
89
+
90
+ Returns:
91
+ List[Dict[str, Any]]: The training data.
92
+ """
93
+ if not self._data:
94
+ logger.info("Data not loaded. Loading data.")
95
+ self.load()
96
+ return self._data["train"]
97
+
98
+ @property
99
+ def valid(self) -> List[Dict[str, Any]]:
100
+ r"""Get the validation data.
101
+
102
+ Returns:
103
+ List[Dict[str, Any]]: The validation data.
104
+ """
105
+ if not self._data:
106
+ logger.info("Data not loaded. Loading data.")
107
+ self.load()
108
+ return self._data["valid"]
109
+
110
+ @property
111
+ def test(self) -> List[Dict[str, Any]]:
112
+ r"""Get the test data.
113
+
114
+ Returns:
115
+ List[Dict[str, Any]]: The test data.
116
+ """
117
+ if not self._data:
118
+ logger.info("Data not loaded. Loading data.")
119
+ self.load()
120
+ return self._data["test"]
121
+
122
+ @abstractmethod
123
+ def run(
124
+ self,
125
+ agent: ChatAgent,
126
+ on: Literal["train", "valid", "test"],
127
+ randomize: bool = False,
128
+ subset: Optional[int] = None,
129
+ *args,
130
+ **kwargs,
131
+ ) -> "BaseBenchmark":
132
+ r"""Run the benchmark.
133
+
134
+ Args:
135
+ agent (ChatAgent): The chat agent.
136
+ on (str): The data split to run the benchmark on.
137
+ randomize (bool): Whether to randomize the data.
138
+ subset (int): The subset of the data to run the benchmark on.
139
+
140
+ Returns:
141
+ BaseBenchmark: The benchmark instance.
142
+ """
143
+ pass
144
+
145
+ @property
146
+ def results(self) -> List[Dict[str, Any]]:
147
+ r"""Get the results.
148
+
149
+ Returns:
150
+ List[Dict[str, Any]]: The results.
151
+ """
152
+ return self._results