camel-ai 0.2.58__py3-none-any.whl → 0.2.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +126 -9
- camel/agents/critic_agent.py +73 -8
- camel/benchmarks/__init__.py +2 -0
- camel/benchmarks/browsecomp.py +854 -0
- camel/configs/cohere_config.py +1 -1
- camel/configs/mistral_config.py +1 -1
- camel/configs/openai_config.py +3 -0
- camel/configs/reka_config.py +1 -1
- camel/configs/samba_config.py +2 -2
- camel/datagen/cot_datagen.py +29 -34
- camel/embeddings/jina_embedding.py +8 -1
- camel/embeddings/sentence_transformers_embeddings.py +2 -2
- camel/embeddings/vlm_embedding.py +9 -2
- camel/human.py +14 -0
- camel/memories/records.py +3 -0
- camel/messages/base.py +15 -3
- camel/models/azure_openai_model.py +1 -0
- camel/models/model_factory.py +2 -2
- camel/retrievers/bm25_retriever.py +1 -2
- camel/retrievers/hybrid_retrival.py +2 -2
- camel/societies/role_playing.py +50 -0
- camel/societies/workforce/role_playing_worker.py +17 -8
- camel/societies/workforce/workforce.py +70 -14
- camel/storages/vectordb_storages/oceanbase.py +1 -2
- camel/toolkits/async_browser_toolkit.py +5 -1
- camel/toolkits/base.py +4 -2
- camel/toolkits/browser_toolkit.py +6 -3
- camel/toolkits/dalle_toolkit.py +4 -0
- camel/toolkits/excel_toolkit.py +11 -3
- camel/toolkits/github_toolkit.py +43 -25
- camel/toolkits/image_analysis_toolkit.py +3 -0
- camel/toolkits/jina_reranker_toolkit.py +194 -77
- camel/toolkits/mcp_toolkit.py +60 -16
- camel/toolkits/page_script.js +40 -28
- camel/toolkits/twitter_toolkit.py +6 -1
- camel/toolkits/video_analysis_toolkit.py +3 -0
- camel/toolkits/video_download_toolkit.py +3 -0
- camel/toolkits/wolfram_alpha_toolkit.py +46 -22
- camel/types/enums.py +14 -5
- {camel_ai-0.2.58.dist-info → camel_ai-0.2.60.dist-info}/METADATA +7 -9
- {camel_ai-0.2.58.dist-info → camel_ai-0.2.60.dist-info}/RECORD +44 -43
- {camel_ai-0.2.58.dist-info → camel_ai-0.2.60.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.58.dist-info → camel_ai-0.2.60.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -758,9 +758,17 @@ class ChatAgent(BaseAgent):
|
|
|
758
758
|
tool_call_records: List[ToolCallingRecord] = []
|
|
759
759
|
external_tool_call_requests: Optional[List[ToolCallRequest]] = None
|
|
760
760
|
|
|
761
|
+
accumulated_context_tokens = (
|
|
762
|
+
0 # This tracks cumulative context tokens, not API usage tokens
|
|
763
|
+
)
|
|
764
|
+
|
|
765
|
+
# Initialize token usage tracker
|
|
766
|
+
step_token_usage = self._create_token_usage_tracker()
|
|
767
|
+
|
|
761
768
|
while True:
|
|
762
769
|
try:
|
|
763
770
|
openai_messages, num_tokens = self.memory.get_context()
|
|
771
|
+
accumulated_context_tokens += num_tokens
|
|
764
772
|
except RuntimeError as e:
|
|
765
773
|
return self._step_terminate(
|
|
766
774
|
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
@@ -768,16 +776,23 @@ class ChatAgent(BaseAgent):
|
|
|
768
776
|
# Get response from model backend
|
|
769
777
|
response = self._get_model_response(
|
|
770
778
|
openai_messages,
|
|
771
|
-
|
|
779
|
+
accumulated_context_tokens, # Cumulative context tokens
|
|
772
780
|
response_format,
|
|
773
781
|
self._get_full_tool_schemas(),
|
|
774
782
|
)
|
|
775
783
|
|
|
784
|
+
# Accumulate API token usage
|
|
785
|
+
self._update_token_usage_tracker(
|
|
786
|
+
step_token_usage, response.usage_dict
|
|
787
|
+
)
|
|
788
|
+
|
|
776
789
|
# Terminate Agent if stop_event is set
|
|
777
790
|
if self.stop_event and self.stop_event.is_set():
|
|
778
791
|
# Use the _step_terminate to terminate the agent with reason
|
|
779
792
|
return self._step_terminate(
|
|
780
|
-
|
|
793
|
+
accumulated_context_tokens,
|
|
794
|
+
tool_call_records,
|
|
795
|
+
"termination_triggered",
|
|
781
796
|
)
|
|
782
797
|
|
|
783
798
|
if tool_call_requests := response.tool_call_requests:
|
|
@@ -813,8 +828,11 @@ class ChatAgent(BaseAgent):
|
|
|
813
828
|
return self._convert_to_chatagent_response(
|
|
814
829
|
response,
|
|
815
830
|
tool_call_records,
|
|
816
|
-
|
|
831
|
+
accumulated_context_tokens,
|
|
817
832
|
external_tool_call_requests,
|
|
833
|
+
step_token_usage["prompt_tokens"],
|
|
834
|
+
step_token_usage["completion_tokens"],
|
|
835
|
+
step_token_usage["total_tokens"],
|
|
818
836
|
)
|
|
819
837
|
|
|
820
838
|
@property
|
|
@@ -857,9 +875,16 @@ class ChatAgent(BaseAgent):
|
|
|
857
875
|
|
|
858
876
|
tool_call_records: List[ToolCallingRecord] = []
|
|
859
877
|
external_tool_call_requests: Optional[List[ToolCallRequest]] = None
|
|
878
|
+
accumulated_context_tokens = (
|
|
879
|
+
0 # This tracks cumulative context tokens, not API usage tokens
|
|
880
|
+
)
|
|
881
|
+
|
|
882
|
+
# Initialize token usage tracker
|
|
883
|
+
step_token_usage = self._create_token_usage_tracker()
|
|
860
884
|
while True:
|
|
861
885
|
try:
|
|
862
886
|
openai_messages, num_tokens = self.memory.get_context()
|
|
887
|
+
accumulated_context_tokens += num_tokens
|
|
863
888
|
except RuntimeError as e:
|
|
864
889
|
return self._step_terminate(
|
|
865
890
|
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
@@ -867,7 +892,7 @@ class ChatAgent(BaseAgent):
|
|
|
867
892
|
|
|
868
893
|
response = await self._aget_model_response(
|
|
869
894
|
openai_messages,
|
|
870
|
-
|
|
895
|
+
accumulated_context_tokens,
|
|
871
896
|
response_format,
|
|
872
897
|
self._get_full_tool_schemas(),
|
|
873
898
|
)
|
|
@@ -876,7 +901,9 @@ class ChatAgent(BaseAgent):
|
|
|
876
901
|
if self.stop_event and self.stop_event.is_set():
|
|
877
902
|
# Use the _step_terminate to terminate the agent with reason
|
|
878
903
|
return self._step_terminate(
|
|
879
|
-
|
|
904
|
+
accumulated_context_tokens,
|
|
905
|
+
tool_call_records,
|
|
906
|
+
"termination_triggered",
|
|
880
907
|
)
|
|
881
908
|
|
|
882
909
|
if tool_call_requests := response.tool_call_requests:
|
|
@@ -910,28 +937,68 @@ class ChatAgent(BaseAgent):
|
|
|
910
937
|
await self._aformat_response_if_needed(response, response_format)
|
|
911
938
|
self._record_final_output(response.output_messages)
|
|
912
939
|
|
|
940
|
+
# Create token usage tracker for this step
|
|
941
|
+
step_token_usage = self._create_token_usage_tracker()
|
|
942
|
+
|
|
943
|
+
# Update with response usage
|
|
944
|
+
self._update_token_usage_tracker(step_token_usage, response.usage_dict)
|
|
945
|
+
|
|
913
946
|
return self._convert_to_chatagent_response(
|
|
914
947
|
response,
|
|
915
948
|
tool_call_records,
|
|
916
|
-
|
|
949
|
+
accumulated_context_tokens,
|
|
917
950
|
external_tool_call_requests,
|
|
951
|
+
step_token_usage["prompt_tokens"],
|
|
952
|
+
step_token_usage["completion_tokens"],
|
|
953
|
+
step_token_usage["total_tokens"],
|
|
918
954
|
)
|
|
919
955
|
|
|
956
|
+
def _create_token_usage_tracker(self) -> Dict[str, int]:
|
|
957
|
+
r"""Creates a fresh token usage tracker for a step.
|
|
958
|
+
|
|
959
|
+
Returns:
|
|
960
|
+
Dict[str, int]: A dictionary for tracking token usage.
|
|
961
|
+
"""
|
|
962
|
+
return {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
|
|
963
|
+
|
|
964
|
+
def _update_token_usage_tracker(
|
|
965
|
+
self, tracker: Dict[str, int], usage_dict: Dict[str, int]
|
|
966
|
+
) -> None:
|
|
967
|
+
r"""Updates a token usage tracker with values from a usage dictionary.
|
|
968
|
+
|
|
969
|
+
Args:
|
|
970
|
+
tracker (Dict[str, int]): The token usage tracker to update.
|
|
971
|
+
usage_dict (Dict[str, int]): The usage dictionary with new values.
|
|
972
|
+
"""
|
|
973
|
+
tracker["prompt_tokens"] += usage_dict.get("prompt_tokens", 0)
|
|
974
|
+
tracker["completion_tokens"] += usage_dict.get("completion_tokens", 0)
|
|
975
|
+
tracker["total_tokens"] += usage_dict.get("total_tokens", 0)
|
|
976
|
+
|
|
920
977
|
def _convert_to_chatagent_response(
|
|
921
978
|
self,
|
|
922
979
|
response: ModelResponse,
|
|
923
980
|
tool_call_records: List[ToolCallingRecord],
|
|
924
|
-
num_tokens: int,
|
|
981
|
+
num_tokens: int, # Context tokens from the last call in step
|
|
925
982
|
external_tool_call_requests: Optional[List[ToolCallRequest]],
|
|
983
|
+
step_api_prompt_tokens: int = 0,
|
|
984
|
+
step_api_completion_tokens: int = 0,
|
|
985
|
+
step_api_total_tokens: int = 0,
|
|
926
986
|
) -> ChatAgentResponse:
|
|
927
987
|
r"""Parse the final model response into the chat agent response."""
|
|
988
|
+
# Create usage_dict for the current step's API calls
|
|
989
|
+
step_api_usage_dict = {
|
|
990
|
+
"prompt_tokens": step_api_prompt_tokens,
|
|
991
|
+
"completion_tokens": step_api_completion_tokens,
|
|
992
|
+
"total_tokens": step_api_total_tokens,
|
|
993
|
+
}
|
|
994
|
+
|
|
928
995
|
info = self._step_get_info(
|
|
929
996
|
response.output_messages,
|
|
930
997
|
response.finish_reasons,
|
|
931
|
-
|
|
998
|
+
step_api_usage_dict, # Pass step-specific API usage here
|
|
932
999
|
response.response_id,
|
|
933
1000
|
tool_call_records,
|
|
934
|
-
num_tokens,
|
|
1001
|
+
num_tokens, # This is context tokens, not API usage
|
|
935
1002
|
external_tool_call_requests,
|
|
936
1003
|
)
|
|
937
1004
|
|
|
@@ -1580,6 +1647,56 @@ class ChatAgent(BaseAgent):
|
|
|
1580
1647
|
"""
|
|
1581
1648
|
self.model_backend.add_strategy(name, strategy_fn)
|
|
1582
1649
|
|
|
1650
|
+
def clone(self, with_memory: bool = False) -> ChatAgent:
|
|
1651
|
+
r"""Creates a new instance of :obj:`ChatAgent` with the same
|
|
1652
|
+
configuration as the current instance.
|
|
1653
|
+
|
|
1654
|
+
Args:
|
|
1655
|
+
with_memory (bool): Whether to copy the memory (conversation
|
|
1656
|
+
history) to the new agent. If True, the new agent will have
|
|
1657
|
+
the same conversation history. If False, the new agent will
|
|
1658
|
+
have a fresh memory with only the system message.
|
|
1659
|
+
(default: :obj:`False`)
|
|
1660
|
+
|
|
1661
|
+
Returns:
|
|
1662
|
+
ChatAgent: A new instance of :obj:`ChatAgent` with the same
|
|
1663
|
+
configuration.
|
|
1664
|
+
"""
|
|
1665
|
+
# Create a new instance with the same configuration
|
|
1666
|
+
# If with_memory is True, set system_message to None
|
|
1667
|
+
# If with_memory is False, use the original system message
|
|
1668
|
+
# To avoid duplicated system memory.
|
|
1669
|
+
system_message = None if with_memory else self._original_system_message
|
|
1670
|
+
|
|
1671
|
+
new_agent = ChatAgent(
|
|
1672
|
+
system_message=system_message,
|
|
1673
|
+
model=self.model_backend.models, # Pass the existing model_backend
|
|
1674
|
+
memory=None, # clone memory later
|
|
1675
|
+
message_window_size=getattr(self.memory, "window_size", None),
|
|
1676
|
+
token_limit=getattr(
|
|
1677
|
+
self.memory.get_context_creator(), "token_limit", None
|
|
1678
|
+
),
|
|
1679
|
+
output_language=self._output_language,
|
|
1680
|
+
tools=list(self._internal_tools.values()),
|
|
1681
|
+
external_tools=[
|
|
1682
|
+
schema for schema in self._external_tool_schemas.values()
|
|
1683
|
+
],
|
|
1684
|
+
response_terminators=self.response_terminators,
|
|
1685
|
+
scheduling_strategy=self.model_backend.scheduling_strategy.__name__,
|
|
1686
|
+
single_iteration=self.single_iteration,
|
|
1687
|
+
stop_event=self.stop_event,
|
|
1688
|
+
)
|
|
1689
|
+
|
|
1690
|
+
# Copy memory if requested
|
|
1691
|
+
if with_memory:
|
|
1692
|
+
# Get all records from the current memory
|
|
1693
|
+
context_records = self.memory.retrieve()
|
|
1694
|
+
# Write them to the new agent's memory
|
|
1695
|
+
for context_record in context_records:
|
|
1696
|
+
new_agent.memory.write_record(context_record.memory_record)
|
|
1697
|
+
|
|
1698
|
+
return new_agent
|
|
1699
|
+
|
|
1583
1700
|
def __repr__(self) -> str:
|
|
1584
1701
|
r"""Returns a string representation of the :obj:`ChatAgent`.
|
|
1585
1702
|
|
camel/agents/critic_agent.py
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import random
|
|
15
15
|
import warnings
|
|
16
|
-
from typing import Any, Dict, Optional, Sequence
|
|
16
|
+
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
|
|
17
17
|
|
|
18
18
|
from colorama import Fore
|
|
19
19
|
|
|
@@ -22,6 +22,7 @@ from camel.memories import AgentMemory
|
|
|
22
22
|
from camel.messages import BaseMessage
|
|
23
23
|
from camel.models import BaseModelBackend
|
|
24
24
|
from camel.responses import ChatAgentResponse
|
|
25
|
+
from camel.types import ModelPlatformType, ModelType
|
|
25
26
|
from camel.utils import get_first_int, print_text_animated
|
|
26
27
|
|
|
27
28
|
# AgentOps decorator setting
|
|
@@ -41,11 +42,18 @@ class CriticAgent(ChatAgent):
|
|
|
41
42
|
r"""A class for the critic agent that assists in selecting an option.
|
|
42
43
|
|
|
43
44
|
Args:
|
|
44
|
-
system_message (BaseMessage): The system message
|
|
45
|
-
agent.
|
|
46
|
-
model (BaseModelBackend,
|
|
47
|
-
|
|
48
|
-
|
|
45
|
+
system_message (Union[BaseMessage, str], optional): The system message
|
|
46
|
+
for the chat agent. (default: :obj:`None`)
|
|
47
|
+
model (Union[BaseModelBackend, Tuple[str, str], str, ModelType,
|
|
48
|
+
Tuple[ModelPlatformType, ModelType], List[BaseModelBackend],
|
|
49
|
+
List[str], List[ModelType], List[Tuple[str, str]],
|
|
50
|
+
List[Tuple[ModelPlatformType, ModelType]]], optional):
|
|
51
|
+
The model backend(s) to use. Can be a single instance,
|
|
52
|
+
a specification (string, enum, tuple), or a list of instances
|
|
53
|
+
or specifications to be managed by `ModelManager`. If a list of
|
|
54
|
+
specifications (not `BaseModelBackend` instances) is provided,
|
|
55
|
+
they will be instantiated using `ModelFactory`. (default:
|
|
56
|
+
:obj:`ModelPlatformType.DEFAULT` with `ModelType.DEFAULT`)
|
|
49
57
|
message_window_size (int, optional): The maximum number of previous
|
|
50
58
|
messages to include in the context window. If `None`, no windowing
|
|
51
59
|
is performed. (default: :obj:`6`)
|
|
@@ -58,8 +66,21 @@ class CriticAgent(ChatAgent):
|
|
|
58
66
|
|
|
59
67
|
def __init__(
|
|
60
68
|
self,
|
|
61
|
-
system_message: BaseMessage,
|
|
62
|
-
model: Optional[
|
|
69
|
+
system_message: Optional[Union[BaseMessage, str]] = None,
|
|
70
|
+
model: Optional[
|
|
71
|
+
Union[
|
|
72
|
+
BaseModelBackend,
|
|
73
|
+
Tuple[str, str],
|
|
74
|
+
str,
|
|
75
|
+
ModelType,
|
|
76
|
+
Tuple[ModelPlatformType, ModelType],
|
|
77
|
+
List[BaseModelBackend],
|
|
78
|
+
List[str],
|
|
79
|
+
List[ModelType],
|
|
80
|
+
List[Tuple[str, str]],
|
|
81
|
+
List[Tuple[ModelPlatformType, ModelType]],
|
|
82
|
+
]
|
|
83
|
+
] = None,
|
|
63
84
|
memory: Optional[AgentMemory] = None,
|
|
64
85
|
message_window_size: int = 6,
|
|
65
86
|
retry_attempts: int = 2,
|
|
@@ -200,3 +221,47 @@ class CriticAgent(ChatAgent):
|
|
|
200
221
|
terminated=False,
|
|
201
222
|
info={},
|
|
202
223
|
)
|
|
224
|
+
|
|
225
|
+
def clone(self, with_memory: bool = False) -> 'CriticAgent':
|
|
226
|
+
r"""Creates a new instance of :obj:`CriticAgent` with the same
|
|
227
|
+
configuration as the current instance.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
with_memory (bool): Whether to copy the memory (conversation
|
|
231
|
+
history) to the new agent. If True, the new agent will have
|
|
232
|
+
the same conversation history. If False, the new agent will
|
|
233
|
+
have a fresh memory with only the system message.
|
|
234
|
+
(default: :obj:`False`)
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
CriticAgent: A new instance of :obj:`CriticAgent` with the same
|
|
238
|
+
configuration.
|
|
239
|
+
"""
|
|
240
|
+
# Create a new instance with the same configuration
|
|
241
|
+
# If with_memory is True, set system_message to None
|
|
242
|
+
# If with_memory is False, use the original system message
|
|
243
|
+
# To avoid duplicated system memory.
|
|
244
|
+
system_message = None if with_memory else self._original_system_message
|
|
245
|
+
|
|
246
|
+
new_agent = CriticAgent(
|
|
247
|
+
system_message=system_message,
|
|
248
|
+
model=self.model_backend.models, # Pass the existing model_backend
|
|
249
|
+
memory=None, # clone memory later
|
|
250
|
+
message_window_size=getattr(self.memory, "window_size", 6),
|
|
251
|
+
retry_attempts=self.retry_attempts,
|
|
252
|
+
verbose=self.verbose,
|
|
253
|
+
logger_color=self.logger_color,
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
# Copy memory if requested
|
|
257
|
+
if with_memory:
|
|
258
|
+
# Get all records from the current memory
|
|
259
|
+
context_records = self.memory.retrieve()
|
|
260
|
+
# Write them to the new agent's memory
|
|
261
|
+
for context_record in context_records:
|
|
262
|
+
new_agent.memory.write_record(context_record.memory_record)
|
|
263
|
+
|
|
264
|
+
# Copy CriticAgent-specific attributes
|
|
265
|
+
new_agent.options_dict = self.options_dict.copy()
|
|
266
|
+
|
|
267
|
+
return new_agent
|
camel/benchmarks/__init__.py
CHANGED
|
@@ -15,12 +15,14 @@
|
|
|
15
15
|
from .apibank import APIBankBenchmark
|
|
16
16
|
from .apibench import APIBenchBenchmark
|
|
17
17
|
from .base import BaseBenchmark
|
|
18
|
+
from .browsecomp import BrowseCompBenchmark
|
|
18
19
|
from .gaia import DefaultGAIARetriever, GAIABenchmark
|
|
19
20
|
from .nexus import NexusBenchmark
|
|
20
21
|
from .ragbench import RAGBenchBenchmark
|
|
21
22
|
|
|
22
23
|
__all__ = [
|
|
23
24
|
"BaseBenchmark",
|
|
25
|
+
"BrowseCompBenchmark",
|
|
24
26
|
"GAIABenchmark",
|
|
25
27
|
"DefaultGAIARetriever",
|
|
26
28
|
"NexusBenchmark",
|