camel-ai 0.2.19__py3-none-any.whl → 0.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (40) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +29 -30
  3. camel/agents/knowledge_graph_agent.py +1 -5
  4. camel/benchmarks/apibench.py +1 -5
  5. camel/benchmarks/nexus.py +1 -5
  6. camel/benchmarks/ragbench.py +2 -2
  7. camel/bots/telegram_bot.py +1 -5
  8. camel/configs/__init__.py +9 -0
  9. camel/configs/aiml_config.py +80 -0
  10. camel/configs/moonshot_config.py +63 -0
  11. camel/configs/siliconflow_config.py +91 -0
  12. camel/datagen/__init__.py +3 -1
  13. camel/datagen/self_improving_cot.py +821 -0
  14. camel/datahubs/huggingface.py +3 -3
  15. camel/embeddings/jina_embedding.py +6 -1
  16. camel/models/__init__.py +4 -0
  17. camel/models/aiml_model.py +147 -0
  18. camel/models/model_factory.py +9 -0
  19. camel/models/moonshot_model.py +138 -0
  20. camel/models/siliconflow_model.py +142 -0
  21. camel/societies/workforce/role_playing_worker.py +2 -4
  22. camel/societies/workforce/single_agent_worker.py +1 -6
  23. camel/societies/workforce/workforce.py +3 -9
  24. camel/toolkits/__init__.py +4 -0
  25. camel/toolkits/reddit_toolkit.py +8 -38
  26. camel/toolkits/search_toolkit.py +12 -0
  27. camel/toolkits/semantic_scholar_toolkit.py +308 -0
  28. camel/toolkits/sympy_toolkit.py +778 -0
  29. camel/toolkits/whatsapp_toolkit.py +11 -32
  30. camel/types/enums.py +137 -6
  31. camel/types/unified_model_type.py +5 -0
  32. camel/utils/__init__.py +7 -2
  33. camel/utils/commons.py +198 -21
  34. camel/utils/deduplication.py +199 -0
  35. camel/utils/token_counting.py +0 -38
  36. {camel_ai-0.2.19.dist-info → camel_ai-0.2.20.dist-info}/METADATA +13 -11
  37. {camel_ai-0.2.19.dist-info → camel_ai-0.2.20.dist-info}/RECORD +40 -30
  38. /camel/datagen/{cotdatagen.py → cot_datagen.py} +0 -0
  39. {camel_ai-0.2.19.dist-info → camel_ai-0.2.20.dist-info}/LICENSE +0 -0
  40. {camel_ai-0.2.19.dist-info → camel_ai-0.2.20.dist-info}/WHEEL +0 -0
@@ -32,19 +32,19 @@ class HuggingFaceDatasetManager(BaseDatasetManager):
32
32
 
33
33
  Args:
34
34
  token (str): The Hugging Face API token. If not provided, the token
35
- will be read from the environment variable `HUGGING_FACE_TOKEN`.
35
+ will be read from the environment variable `HF_TOKEN`.
36
36
  """
37
37
 
38
38
  @api_keys_required(
39
39
  [
40
- ("token", "HUGGING_FACE_TOKEN"),
40
+ ("token", "HF_TOKEN"),
41
41
  ]
42
42
  )
43
43
  @dependencies_required('huggingface_hub')
44
44
  def __init__(self, token: Optional[str] = None):
45
45
  from huggingface_hub import HfApi
46
46
 
47
- self._api_key = token or os.getenv("HUGGING_FACE_TOKEN")
47
+ self._api_key = token or os.getenv("HF_TOKEN")
48
48
  self.api = HfApi(token=self._api_key)
49
49
 
50
50
  def create_dataset_card(
@@ -35,6 +35,11 @@ class JinaEmbedding(BaseEmbedding[Union[str, Image.Image]]):
35
35
  Jina AI. (default: :obj:`None`)
36
36
  dimensions (Optional[int], optional): The dimension of the output
37
37
  embeddings. (default: :obj:`None`)
38
+ embedding_type (Optional[str], optional): The type of embedding format
39
+ to generate. Options: 'int8' (binary encoding with higher storage
40
+ and transfer efficiency), 'uint8' (unsigned binary encoding with
41
+ higher storage and transfer efficiency), 'base64' (base64 string
42
+ encoding with higher transfer efficiency). (default: :obj:`None`)
38
43
  task (Optional[str], optional): The type of task for text embeddings.
39
44
  Options: retrieval.query, retrieval.passage, text-matching,
40
45
  classification, separation. (default: :obj:`None`)
@@ -120,7 +125,7 @@ class JinaEmbedding(BaseEmbedding[Union[str, Image.Image]]):
120
125
  else:
121
126
  raise ValueError(
122
127
  f"Input type {type(obj)} is not supported. "
123
- "Must be either str or PIL.Image"
128
+ "Must be either str or PIL.Image."
124
129
  )
125
130
 
126
131
  data = {
camel/models/__init__.py CHANGED
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from .aiml_model import AIMLModel
14
15
  from .anthropic_model import AnthropicModel
15
16
  from .azure_openai_model import AzureOpenAIModel
16
17
  from .base_model import BaseModelBackend
@@ -24,6 +25,7 @@ from .litellm_model import LiteLLMModel
24
25
  from .mistral_model import MistralModel
25
26
  from .model_factory import ModelFactory
26
27
  from .model_manager import ModelManager, ModelProcessingError
28
+ from .moonshot_model import MoonshotModel
27
29
  from .nemotron_model import NemotronModel
28
30
  from .nvidia_model import NvidiaModel
29
31
  from .ollama_model import OllamaModel
@@ -70,4 +72,6 @@ __all__ = [
70
72
  'DeepSeekModel',
71
73
  'FishAudioModel',
72
74
  'InternLMModel',
75
+ 'MoonshotModel',
76
+ 'AIMLModel',
73
77
  ]
@@ -0,0 +1,147 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, List, Optional, Union
16
+
17
+ from openai import OpenAI, Stream
18
+
19
+ from camel.configs import AIML_API_PARAMS, AIMLConfig
20
+ from camel.messages import OpenAIMessage
21
+ from camel.models.base_model import BaseModelBackend
22
+ from camel.types import (
23
+ ChatCompletion,
24
+ ChatCompletionChunk,
25
+ ModelType,
26
+ )
27
+ from camel.utils import (
28
+ BaseTokenCounter,
29
+ OpenAITokenCounter,
30
+ api_keys_required,
31
+ )
32
+
33
+
34
+ class AIMLModel(BaseModelBackend):
35
+ r"""AIML API in a unified BaseModelBackend interface.
36
+
37
+ Args:
38
+ model_type (Union[ModelType, str]): Model for which a backend is
39
+ created.
40
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
41
+ that will be fed into OpenAI client. If :obj:`None`,
42
+ :obj:`AIMLConfig().as_dict()` will be used.
43
+ (default: :obj:`None`)
44
+ api_key (Optional[str], optional): The API key for authenticating with
45
+ the AIML service. (default: :obj:`None`)
46
+ url (Optional[str], optional): The URL to the AIML service. If
47
+ not provided, :obj:`https://api.aimlapi.com/v1` will be used.
48
+ (default: :obj:`None`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
51
+ ModelType.GPT_4O_MINI)` will be used.
52
+ (default: :obj:`None`)
53
+ """
54
+
55
+ @api_keys_required(
56
+ [
57
+ ("api_key", 'AIML_API_KEY'),
58
+ ]
59
+ )
60
+ def __init__(
61
+ self,
62
+ model_type: Union[ModelType, str],
63
+ model_config_dict: Optional[Dict[str, Any]] = None,
64
+ api_key: Optional[str] = None,
65
+ url: Optional[str] = None,
66
+ token_counter: Optional[BaseTokenCounter] = None,
67
+ ) -> None:
68
+ if model_config_dict is None:
69
+ model_config_dict = AIMLConfig().as_dict()
70
+ api_key = api_key or os.environ.get("AIML_API_KEY")
71
+ url = url or os.environ.get(
72
+ "AIML_API_BASE_URL",
73
+ "https://api.aimlapi.com/v1",
74
+ )
75
+ super().__init__(
76
+ model_type, model_config_dict, api_key, url, token_counter
77
+ )
78
+ self._client = OpenAI(
79
+ timeout=180,
80
+ max_retries=3,
81
+ api_key=self._api_key,
82
+ base_url=self._url,
83
+ )
84
+
85
+ def run(
86
+ self,
87
+ messages: List[OpenAIMessage],
88
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
+ r"""Runs inference of OpenAI chat completion.
90
+
91
+ Args:
92
+ messages (List[OpenAIMessage]): Message list with the chat history
93
+ in OpenAI API format.
94
+
95
+ Returns:
96
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
97
+ `ChatCompletion` in the non-stream mode, or
98
+ `Stream[ChatCompletionChunk]` in the stream mode.
99
+ """
100
+ # Process model configuration parameters
101
+ model_config = self.model_config_dict.copy()
102
+
103
+ # Handle special case for tools parameter
104
+ if model_config.get('tools') is None:
105
+ model_config['tools'] = []
106
+
107
+ response = self._client.chat.completions.create(
108
+ messages=messages, model=self.model_type, **model_config
109
+ )
110
+ return response
111
+
112
+ @property
113
+ def token_counter(self) -> BaseTokenCounter:
114
+ r"""Initialize the token counter for the model backend.
115
+
116
+ Returns:
117
+ BaseTokenCounter: The token counter following the model's
118
+ tokenization style.
119
+ """
120
+ if not self._token_counter:
121
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
122
+ return self._token_counter
123
+
124
+ def check_model_config(self):
125
+ r"""Check whether the model configuration contains any
126
+ unexpected arguments to AIML API.
127
+
128
+ Raises:
129
+ ValueError: If the model configuration dictionary contains any
130
+ unexpected arguments to AIML API.
131
+ """
132
+ for param in self.model_config_dict:
133
+ if param not in AIML_API_PARAMS:
134
+ raise ValueError(
135
+ f"Unexpected argument `{param}` is "
136
+ "input into AIML model backend."
137
+ )
138
+
139
+ @property
140
+ def stream(self) -> bool:
141
+ """Returns whether the model is in stream mode, which sends partial
142
+ results each time.
143
+
144
+ Returns:
145
+ bool: Whether the model is in stream mode.
146
+ """
147
+ return self.model_config_dict.get('stream', False)
@@ -13,6 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from typing import Dict, Optional, Type, Union
15
15
 
16
+ from camel.models.aiml_model import AIMLModel
16
17
  from camel.models.anthropic_model import AnthropicModel
17
18
  from camel.models.azure_openai_model import AzureOpenAIModel
18
19
  from camel.models.base_model import BaseModelBackend
@@ -23,6 +24,7 @@ from camel.models.groq_model import GroqModel
23
24
  from camel.models.internlm_model import InternLMModel
24
25
  from camel.models.litellm_model import LiteLLMModel
25
26
  from camel.models.mistral_model import MistralModel
27
+ from camel.models.moonshot_model import MoonshotModel
26
28
  from camel.models.nvidia_model import NvidiaModel
27
29
  from camel.models.ollama_model import OllamaModel
28
30
  from camel.models.openai_compatible_model import OpenAICompatibleModel
@@ -31,6 +33,7 @@ from camel.models.qwen_model import QwenModel
31
33
  from camel.models.reka_model import RekaModel
32
34
  from camel.models.samba_model import SambaModel
33
35
  from camel.models.sglang_model import SGLangModel
36
+ from camel.models.siliconflow_model import SiliconFlowModel
34
37
  from camel.models.stub_model import StubModel
35
38
  from camel.models.togetherai_model import TogetherAIModel
36
39
  from camel.models.vllm_model import VLLMModel
@@ -100,6 +103,10 @@ class ModelFactory:
100
103
  model_class = LiteLLMModel
101
104
  elif model_platform.is_nvidia:
102
105
  model_class = NvidiaModel
106
+ elif model_platform.is_siliconflow:
107
+ model_class = SiliconFlowModel
108
+ elif model_platform.is_aiml:
109
+ model_class = AIMLModel
103
110
 
104
111
  elif model_platform.is_openai and model_type.is_openai:
105
112
  model_class = OpenAIModel
@@ -127,6 +134,8 @@ class ModelFactory:
127
134
  model_class = DeepSeekModel
128
135
  elif model_platform.is_internlm and model_type.is_internlm:
129
136
  model_class = InternLMModel
137
+ elif model_platform.is_moonshot and model_type.is_moonshot:
138
+ model_class = MoonshotModel
130
139
  elif model_type == ModelType.STUB:
131
140
  model_class = StubModel
132
141
 
@@ -0,0 +1,138 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Union
17
+
18
+ from openai import OpenAI, Stream
19
+
20
+ from camel.configs import MOONSHOT_API_PARAMS, MoonshotConfig
21
+ from camel.messages import OpenAIMessage
22
+ from camel.models import BaseModelBackend
23
+ from camel.types import (
24
+ ChatCompletion,
25
+ ChatCompletionChunk,
26
+ ModelType,
27
+ )
28
+ from camel.utils import (
29
+ BaseTokenCounter,
30
+ OpenAITokenCounter,
31
+ api_keys_required,
32
+ )
33
+
34
+
35
+ class MoonshotModel(BaseModelBackend):
36
+ r"""Moonshot API in a unified BaseModelBackend interface.
37
+
38
+ Args:
39
+ model_type (Union[ModelType, str]): Model for which a backend is
40
+ created, one of Moonshot series.
41
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
42
+ that will be fed into :obj:`openai.ChatCompletion.create()`. If
43
+ :obj:`None`, :obj:`MoonshotConfig().as_dict()` will be used.
44
+ (default: :obj:`None`)
45
+ api_key (Optional[str], optional): The API key for authenticating with
46
+ the Moonshot service. (default: :obj:`None`)
47
+ url (Optional[str], optional): The url to the Moonshot service.
48
+ (default: :obj:`https://api.moonshot.cn/v1`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
51
+ ModelType.GPT_4)` will be used.
52
+ (default: :obj:`None`)
53
+ """
54
+
55
+ @api_keys_required([("api_key", "MOONSHOT_API_KEY")])
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ ) -> None:
64
+ if model_config_dict is None:
65
+ model_config_dict = MoonshotConfig().as_dict()
66
+ api_key = api_key or os.environ.get("MOONSHOT_API_KEY")
67
+ url = url or os.environ.get(
68
+ "MOONSHOT_API_BASE_URL",
69
+ "https://api.moonshot.cn/v1",
70
+ )
71
+ super().__init__(
72
+ model_type, model_config_dict, api_key, url, token_counter
73
+ )
74
+ self._client = OpenAI(
75
+ api_key=self._api_key,
76
+ timeout=180,
77
+ max_retries=3,
78
+ base_url=self._url,
79
+ )
80
+
81
+ def run(
82
+ self,
83
+ messages: List[OpenAIMessage],
84
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
85
+ r"""Runs inference of Moonshot chat completion.
86
+
87
+ Args:
88
+ messages (List[OpenAIMessage]): Message list with the chat history
89
+ in OpenAI API format.
90
+
91
+ Returns:
92
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
93
+ `ChatCompletion` in the non-stream mode, or
94
+ `Stream[ChatCompletionChunk]` in the stream mode.
95
+ """
96
+ response = self._client.chat.completions.create(
97
+ messages=messages,
98
+ model=self.model_type,
99
+ **self.model_config_dict,
100
+ )
101
+ return response
102
+
103
+ @property
104
+ def token_counter(self) -> BaseTokenCounter:
105
+ r"""Initialize the token counter for the model backend.
106
+
107
+ Returns:
108
+ OpenAITokenCounter: The token counter following the model's
109
+ tokenization style.
110
+ """
111
+ if not self._token_counter:
112
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
113
+ return self._token_counter
114
+
115
+ def check_model_config(self):
116
+ r"""Check whether the model configuration contains any
117
+ unexpected arguments to Moonshot API.
118
+
119
+ Raises:
120
+ ValueError: If the model configuration dictionary contains any
121
+ unexpected arguments to Moonshot API.
122
+ """
123
+ for param in self.model_config_dict:
124
+ if param not in MOONSHOT_API_PARAMS:
125
+ raise ValueError(
126
+ f"Unexpected argument `{param}` is "
127
+ "input into Moonshot model backend."
128
+ )
129
+
130
+ @property
131
+ def stream(self) -> bool:
132
+ r"""Returns whether the model is in stream mode, which sends partial
133
+ results each time.
134
+
135
+ Returns:
136
+ bool: Whether the model is in stream mode.
137
+ """
138
+ return self.model_config_dict.get('stream', False)
@@ -0,0 +1,142 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, List, Optional, Union
16
+
17
+ from openai import OpenAI, Stream
18
+
19
+ from camel.configs import SILICONFLOW_API_PARAMS, SiliconFlowConfig
20
+ from camel.messages import OpenAIMessage
21
+ from camel.models.base_model import BaseModelBackend
22
+ from camel.types import (
23
+ ChatCompletion,
24
+ ChatCompletionChunk,
25
+ ModelType,
26
+ )
27
+ from camel.utils import (
28
+ BaseTokenCounter,
29
+ OpenAITokenCounter,
30
+ api_keys_required,
31
+ )
32
+
33
+
34
+ class SiliconFlowModel(BaseModelBackend):
35
+ r"""SiliconFlow API in a unified BaseModelBackend interface.
36
+
37
+ Args:
38
+ model_type (Union[ModelType, str]): Model for which a backend is
39
+ created.
40
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
41
+ that will be fed into OpenAI client. If :obj:`None`,
42
+ :obj:`SiliconFlowConfig().as_dict()` will be used.
43
+ (default: :obj:`None`)
44
+ api_key (Optional[str], optional): The API key for authenticating with
45
+ the SiliconFlow service. (default: :obj:`None`)
46
+ url (Optional[str], optional): The URL to the SiliconFlow service. If
47
+ not provided, :obj:`https://api.siliconflow.cn/v1/` will be used.
48
+ (default: :obj:`None`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
51
+ ModelType.GPT_4O_MINI)` will be used.
52
+ (default: :obj:`None`)
53
+ """
54
+
55
+ @api_keys_required(
56
+ [
57
+ ("api_key", 'SILICONFLOW_API_KEY'),
58
+ ]
59
+ )
60
+ def __init__(
61
+ self,
62
+ model_type: Union[ModelType, str],
63
+ model_config_dict: Optional[Dict[str, Any]] = None,
64
+ api_key: Optional[str] = None,
65
+ url: Optional[str] = None,
66
+ token_counter: Optional[BaseTokenCounter] = None,
67
+ ) -> None:
68
+ if model_config_dict is None:
69
+ model_config_dict = SiliconFlowConfig().as_dict()
70
+ api_key = api_key or os.environ.get("SILICONFLOW_API_KEY")
71
+ url = url or os.environ.get(
72
+ "SILICONFLOW_API_BASE_URL",
73
+ "https://api.siliconflow.cn/v1/",
74
+ )
75
+ super().__init__(
76
+ model_type, model_config_dict, api_key, url, token_counter
77
+ )
78
+ self._client = OpenAI(
79
+ timeout=180,
80
+ max_retries=3,
81
+ api_key=self._api_key,
82
+ base_url=self._url,
83
+ )
84
+
85
+ def run(
86
+ self,
87
+ messages: List[OpenAIMessage],
88
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
+ r"""Runs inference of SiliconFlow chat completion.
90
+
91
+ Args:
92
+ messages (List[OpenAIMessage]): Message list with the chat history
93
+ in OpenAI API format.
94
+
95
+ Returns:
96
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
97
+ `ChatCompletion` in the non-stream mode, or
98
+ `Stream[ChatCompletionChunk]` in the stream mode.
99
+ """
100
+ response = self._client.chat.completions.create(
101
+ messages=messages,
102
+ model=self.model_type,
103
+ **self.model_config_dict,
104
+ )
105
+ return response
106
+
107
+ @property
108
+ def token_counter(self) -> BaseTokenCounter:
109
+ r"""Initialize the token counter for the model backend.
110
+
111
+ Returns:
112
+ BaseTokenCounter: The token counter following the model's
113
+ tokenization style.
114
+ """
115
+ if not self._token_counter:
116
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
117
+ return self._token_counter
118
+
119
+ def check_model_config(self):
120
+ r"""Check whether the model configuration contains any
121
+ unexpected arguments to SiliconFlow API.
122
+
123
+ Raises:
124
+ ValueError: If the model configuration dictionary contains any
125
+ unexpected arguments to SiliconFlow API.
126
+ """
127
+ for param in self.model_config_dict:
128
+ if param not in SILICONFLOW_API_PARAMS:
129
+ raise ValueError(
130
+ f"Unexpected argument `{param}` is "
131
+ "input into SiliconFlow model backend."
132
+ )
133
+
134
+ @property
135
+ def stream(self) -> bool:
136
+ """Returns whether the model is in stream mode, which sends partial
137
+ results each time.
138
+
139
+ Returns:
140
+ bool: Whether the model is in stream mode.
141
+ """
142
+ return self.model_config_dict.get('stream', False)
@@ -168,11 +168,9 @@ class RolePlayingWorker(Worker):
168
168
  chat_history=chat_history_str,
169
169
  additional_info=task.additional_info,
170
170
  )
171
- req = BaseMessage.make_user_message(
172
- role_name="User",
173
- content=prompt,
171
+ response = self.summarize_agent.step(
172
+ prompt, response_format=TaskResult
174
173
  )
175
- response = self.summarize_agent.step(req, response_format=TaskResult)
176
174
  result_dict = json.loads(response.msg.content)
177
175
  task_result = TaskResult(**result_dict)
178
176
  task.result = task_result.content
@@ -19,7 +19,6 @@ from typing import Any, List
19
19
  from colorama import Fore
20
20
 
21
21
  from camel.agents import ChatAgent
22
- from camel.messages.base import BaseMessage
23
22
  from camel.societies.workforce.prompts import PROCESS_TASK_PROMPT
24
23
  from camel.societies.workforce.utils import TaskResult
25
24
  from camel.societies.workforce.worker import Worker
@@ -72,12 +71,8 @@ class SingleAgentWorker(Worker):
72
71
  dependency_tasks_info=dependency_tasks_info,
73
72
  additional_info=task.additional_info,
74
73
  )
75
- req = BaseMessage.make_user_message(
76
- role_name="User",
77
- content=prompt,
78
- )
79
74
  try:
80
- response = self.worker.step(req, response_format=TaskResult)
75
+ response = self.worker.step(prompt, response_format=TaskResult)
81
76
  except Exception as e:
82
77
  print(
83
78
  f"{Fore.RED}Error occurred while processing task {task.id}:"
@@ -281,13 +281,9 @@ class Workforce(BaseNode):
281
281
  child_nodes_info=self._get_child_nodes_info(),
282
282
  additional_info=task.additional_info,
283
283
  )
284
- req = BaseMessage.make_user_message(
285
- role_name="User",
286
- content=prompt,
287
- )
288
284
 
289
285
  response = self.coordinator_agent.step(
290
- req, response_format=TaskAssignResult
286
+ prompt, response_format=TaskAssignResult
291
287
  )
292
288
  result_dict = json.loads(response.msg.content)
293
289
  task_assign_result = TaskAssignResult(**result_dict)
@@ -315,11 +311,9 @@ class Workforce(BaseNode):
315
311
  child_nodes_info=self._get_child_nodes_info(),
316
312
  additional_info=task.additional_info,
317
313
  )
318
- req = BaseMessage.make_user_message(
319
- role_name="User",
320
- content=prompt,
314
+ response = self.coordinator_agent.step(
315
+ prompt, response_format=WorkerConf
321
316
  )
322
- response = self.coordinator_agent.step(req, response_format=WorkerConf)
323
317
  result_dict = json.loads(response.msg.content)
324
318
  new_node_conf = WorkerConf(**result_dict)
325
319
 
@@ -45,6 +45,8 @@ from .human_toolkit import HumanToolkit
45
45
  from .stripe_toolkit import StripeToolkit
46
46
  from .video_toolkit import VideoDownloaderToolkit
47
47
  from .dappier_toolkit import DappierToolkit
48
+ from .sympy_toolkit import SymPyToolkit
49
+ from .semantic_scholar_toolkit import SemanticScholarToolkit
48
50
 
49
51
  __all__ = [
50
52
  'BaseToolkit',
@@ -77,4 +79,6 @@ __all__ = [
77
79
  'MeshyToolkit',
78
80
  'OpenBBToolkit',
79
81
  'DappierToolkit',
82
+ 'SymPyToolkit',
83
+ 'SemanticScholarToolkit',
80
84
  ]