camel-ai 0.2.18__py3-none-any.whl → 0.2.20a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (34) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +27 -27
  3. camel/agents/multi_hop_generator_agent.py +35 -3
  4. camel/agents/programmed_agent_instruction.py +73 -18
  5. camel/configs/__init__.py +6 -0
  6. camel/configs/gemini_config.py +1 -1
  7. camel/configs/moonshot_config.py +63 -0
  8. camel/configs/sglang_config.py +4 -0
  9. camel/configs/siliconflow_config.py +91 -0
  10. camel/datagen/source2synth/__init__.py +31 -0
  11. camel/{synthetic_datagen → datagen}/source2synth/data_processor.py +194 -29
  12. camel/{synthetic_datagen → datagen}/source2synth/models.py +25 -0
  13. camel/{synthetic_datagen → datagen}/source2synth/user_data_processor_config.py +9 -8
  14. camel/datahubs/huggingface.py +3 -3
  15. camel/embeddings/__init__.py +2 -0
  16. camel/embeddings/jina_embedding.py +161 -0
  17. camel/messages/func_message.py +1 -1
  18. camel/models/__init__.py +2 -0
  19. camel/models/deepseek_model.py +29 -11
  20. camel/models/groq_model.py +0 -2
  21. camel/models/model_factory.py +6 -0
  22. camel/models/moonshot_model.py +138 -0
  23. camel/models/openai_model.py +1 -9
  24. camel/models/siliconflow_model.py +142 -0
  25. camel/toolkits/__init__.py +2 -0
  26. camel/toolkits/search_toolkit.py +17 -6
  27. camel/toolkits/semantic_scholar_toolkit.py +308 -0
  28. camel/types/enums.py +176 -15
  29. camel/types/unified_model_type.py +5 -0
  30. camel/utils/token_counting.py +1 -1
  31. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20a0.dist-info}/METADATA +9 -3
  32. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20a0.dist-info}/RECORD +34 -27
  33. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20a0.dist-info}/LICENSE +0 -0
  34. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20a0.dist-info}/WHEEL +0 -0
@@ -13,12 +13,12 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- import warnings
17
16
  from typing import Any, Dict, List, Optional, Union
18
17
 
19
18
  from openai import OpenAI, Stream
20
19
 
21
20
  from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
21
+ from camel.logger import get_logger
22
22
  from camel.messages import OpenAIMessage
23
23
  from camel.models.base_model import BaseModelBackend
24
24
  from camel.types import (
@@ -28,6 +28,8 @@ from camel.types import (
28
28
  )
29
29
  from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
30
30
 
31
+ logger = get_logger(__name__)
32
+
31
33
 
32
34
  class DeepSeekModel(BaseModelBackend):
33
35
  r"""DeepSeek API in a unified BaseModelBackend interface.
@@ -116,11 +118,12 @@ class DeepSeekModel(BaseModelBackend):
116
118
  if self.model_type in [
117
119
  ModelType.DEEPSEEK_REASONER,
118
120
  ]:
119
- warnings.warn(
120
- "Warning: You are using an DeepSeek Reasoner model, "
121
+ import re
122
+
123
+ logger.warning(
124
+ "You are using a DeepSeek Reasoner model, "
121
125
  "which has certain limitations, reference: "
122
- "`https://api-docs.deepseek.com/guides/reasoning_model#api-parameters`.",
123
- UserWarning,
126
+ "`https://api-docs.deepseek.com/guides/reasoning_model#api-parameters`"
124
127
  )
125
128
 
126
129
  # Check and remove unsupported parameters and reset the fixed
@@ -138,14 +141,29 @@ class DeepSeekModel(BaseModelBackend):
138
141
  if key in self.model_config_dict:
139
142
  del self.model_config_dict[key]
140
143
 
144
+ # Remove thinking content from messages before sending to API
145
+ # This ensures only the final response is sent, excluding
146
+ # intermediate thought processes
147
+ messages = [
148
+ { # type: ignore[misc]
149
+ **msg,
150
+ 'content': re.sub(
151
+ r'<think>.*?</think>',
152
+ '',
153
+ msg['content'], # type: ignore[arg-type]
154
+ flags=re.DOTALL,
155
+ ).strip(),
156
+ }
157
+ for msg in messages
158
+ ]
159
+
141
160
  response = self._client.chat.completions.create(
142
161
  messages=messages,
143
162
  model=self.model_type,
144
163
  **self.model_config_dict,
145
164
  )
146
165
 
147
- # Temporary solution to handle the case where
148
- # deepseek returns a reasoning_content
166
+ # Handle reasoning content with <think> tags at the beginning
149
167
  if (
150
168
  self.model_type
151
169
  in [
@@ -156,10 +174,10 @@ class DeepSeekModel(BaseModelBackend):
156
174
  ):
157
175
  reasoning_content = response.choices[0].message.reasoning_content
158
176
  combined_content = (
159
- response.choices[0].message.content
160
- + "\n\nBELOW IS THE REASONING CONTENT:\n\n"
161
- + (reasoning_content if reasoning_content else "")
162
- )
177
+ f"<think>\n{reasoning_content}\n</think>\n"
178
+ if reasoning_content
179
+ else ""
180
+ ) + response.choices[0].message.content
163
181
 
164
182
  response = ChatCompletion.construct(
165
183
  id=response.id,
@@ -88,8 +88,6 @@ class GroqModel(BaseModelBackend):
88
88
  BaseTokenCounter: The token counter following the model's
89
89
  tokenization style.
90
90
  """
91
- # Make sure you have the access to these open-source model in
92
- # HuggingFace
93
91
  if not self._token_counter:
94
92
  self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
95
93
  return self._token_counter
@@ -23,6 +23,7 @@ from camel.models.groq_model import GroqModel
23
23
  from camel.models.internlm_model import InternLMModel
24
24
  from camel.models.litellm_model import LiteLLMModel
25
25
  from camel.models.mistral_model import MistralModel
26
+ from camel.models.moonshot_model import MoonshotModel
26
27
  from camel.models.nvidia_model import NvidiaModel
27
28
  from camel.models.ollama_model import OllamaModel
28
29
  from camel.models.openai_compatible_model import OpenAICompatibleModel
@@ -31,6 +32,7 @@ from camel.models.qwen_model import QwenModel
31
32
  from camel.models.reka_model import RekaModel
32
33
  from camel.models.samba_model import SambaModel
33
34
  from camel.models.sglang_model import SGLangModel
35
+ from camel.models.siliconflow_model import SiliconFlowModel
34
36
  from camel.models.stub_model import StubModel
35
37
  from camel.models.togetherai_model import TogetherAIModel
36
38
  from camel.models.vllm_model import VLLMModel
@@ -100,6 +102,8 @@ class ModelFactory:
100
102
  model_class = LiteLLMModel
101
103
  elif model_platform.is_nvidia:
102
104
  model_class = NvidiaModel
105
+ elif model_platform.is_siliconflow:
106
+ model_class = SiliconFlowModel
103
107
 
104
108
  elif model_platform.is_openai and model_type.is_openai:
105
109
  model_class = OpenAIModel
@@ -127,6 +131,8 @@ class ModelFactory:
127
131
  model_class = DeepSeekModel
128
132
  elif model_platform.is_internlm and model_type.is_internlm:
129
133
  model_class = InternLMModel
134
+ elif model_platform.is_moonshot and model_type.is_moonshot:
135
+ model_class = MoonshotModel
130
136
  elif model_type == ModelType.STUB:
131
137
  model_class = StubModel
132
138
 
@@ -0,0 +1,138 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Union
17
+
18
+ from openai import OpenAI, Stream
19
+
20
+ from camel.configs import MOONSHOT_API_PARAMS, MoonshotConfig
21
+ from camel.messages import OpenAIMessage
22
+ from camel.models import BaseModelBackend
23
+ from camel.types import (
24
+ ChatCompletion,
25
+ ChatCompletionChunk,
26
+ ModelType,
27
+ )
28
+ from camel.utils import (
29
+ BaseTokenCounter,
30
+ OpenAITokenCounter,
31
+ api_keys_required,
32
+ )
33
+
34
+
35
+ class MoonshotModel(BaseModelBackend):
36
+ r"""Moonshot API in a unified BaseModelBackend interface.
37
+
38
+ Args:
39
+ model_type (Union[ModelType, str]): Model for which a backend is
40
+ created, one of Moonshot series.
41
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
42
+ that will be fed into :obj:`openai.ChatCompletion.create()`. If
43
+ :obj:`None`, :obj:`MoonshotConfig().as_dict()` will be used.
44
+ (default: :obj:`None`)
45
+ api_key (Optional[str], optional): The API key for authenticating with
46
+ the Moonshot service. (default: :obj:`None`)
47
+ url (Optional[str], optional): The url to the Moonshot service.
48
+ (default: :obj:`https://api.moonshot.cn/v1`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
51
+ ModelType.GPT_4)` will be used.
52
+ (default: :obj:`None`)
53
+ """
54
+
55
+ @api_keys_required([("api_key", "MOONSHOT_API_KEY")])
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ ) -> None:
64
+ if model_config_dict is None:
65
+ model_config_dict = MoonshotConfig().as_dict()
66
+ api_key = api_key or os.environ.get("MOONSHOT_API_KEY")
67
+ url = url or os.environ.get(
68
+ "MOONSHOT_API_BASE_URL",
69
+ "https://api.moonshot.cn/v1",
70
+ )
71
+ super().__init__(
72
+ model_type, model_config_dict, api_key, url, token_counter
73
+ )
74
+ self._client = OpenAI(
75
+ api_key=self._api_key,
76
+ timeout=180,
77
+ max_retries=3,
78
+ base_url=self._url,
79
+ )
80
+
81
+ def run(
82
+ self,
83
+ messages: List[OpenAIMessage],
84
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
85
+ r"""Runs inference of Moonshot chat completion.
86
+
87
+ Args:
88
+ messages (List[OpenAIMessage]): Message list with the chat history
89
+ in OpenAI API format.
90
+
91
+ Returns:
92
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
93
+ `ChatCompletion` in the non-stream mode, or
94
+ `Stream[ChatCompletionChunk]` in the stream mode.
95
+ """
96
+ response = self._client.chat.completions.create(
97
+ messages=messages,
98
+ model=self.model_type,
99
+ **self.model_config_dict,
100
+ )
101
+ return response
102
+
103
+ @property
104
+ def token_counter(self) -> BaseTokenCounter:
105
+ r"""Initialize the token counter for the model backend.
106
+
107
+ Returns:
108
+ OpenAITokenCounter: The token counter following the model's
109
+ tokenization style.
110
+ """
111
+ if not self._token_counter:
112
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
113
+ return self._token_counter
114
+
115
+ def check_model_config(self):
116
+ r"""Check whether the model configuration contains any
117
+ unexpected arguments to Moonshot API.
118
+
119
+ Raises:
120
+ ValueError: If the model configuration dictionary contains any
121
+ unexpected arguments to Moonshot API.
122
+ """
123
+ for param in self.model_config_dict:
124
+ if param not in MOONSHOT_API_PARAMS:
125
+ raise ValueError(
126
+ f"Unexpected argument `{param}` is "
127
+ "input into Moonshot model backend."
128
+ )
129
+
130
+ @property
131
+ def stream(self) -> bool:
132
+ r"""Returns whether the model is in stream mode, which sends partial
133
+ results each time.
134
+
135
+ Returns:
136
+ bool: Whether the model is in stream mode.
137
+ """
138
+ return self.model_config_dict.get('stream', False)
@@ -21,7 +21,6 @@ from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
21
21
  from camel.messages import OpenAIMessage
22
22
  from camel.models import BaseModelBackend
23
23
  from camel.types import (
24
- NOT_GIVEN,
25
24
  ChatCompletion,
26
25
  ChatCompletionChunk,
27
26
  ModelType,
@@ -112,6 +111,7 @@ class OpenAIModel(BaseModelBackend):
112
111
  ModelType.O1,
113
112
  ModelType.O1_MINI,
114
113
  ModelType.O1_PREVIEW,
114
+ ModelType.O3_MINI,
115
115
  ]:
116
116
  warnings.warn(
117
117
  "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
@@ -148,14 +148,6 @@ class OpenAIModel(BaseModelBackend):
148
148
 
149
149
  return self._to_chat_completion(response)
150
150
 
151
- # Removing 'strict': True from the dictionary for
152
- # client.chat.completions.create
153
- if self.model_config_dict.get('tools') is not NOT_GIVEN:
154
- for tool in self.model_config_dict.get('tools', []):
155
- function_dict = tool.get('function', {})
156
- if 'strict' in function_dict:
157
- del function_dict['strict']
158
-
159
151
  response = self._client.chat.completions.create(
160
152
  messages=messages,
161
153
  model=self.model_type,
@@ -0,0 +1,142 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, List, Optional, Union
16
+
17
+ from openai import OpenAI, Stream
18
+
19
+ from camel.configs import SILICONFLOW_API_PARAMS, SiliconFlowConfig
20
+ from camel.messages import OpenAIMessage
21
+ from camel.models import BaseModelBackend
22
+ from camel.types import (
23
+ ChatCompletion,
24
+ ChatCompletionChunk,
25
+ ModelType,
26
+ )
27
+ from camel.utils import (
28
+ BaseTokenCounter,
29
+ OpenAITokenCounter,
30
+ api_keys_required,
31
+ )
32
+
33
+
34
+ class SiliconFlowModel(BaseModelBackend):
35
+ r"""SiliconFlow API in a unified BaseModelBackend interface.
36
+
37
+ Args:
38
+ model_type (Union[ModelType, str]): Model for which a backend is
39
+ created.
40
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
41
+ that will be fed into OpenAI client. If :obj:`None`,
42
+ :obj:`SiliconFlowConfig().as_dict()` will be used.
43
+ (default: :obj:`None`)
44
+ api_key (Optional[str], optional): The API key for authenticating with
45
+ the SiliconFlow service. (default: :obj:`None`)
46
+ url (Optional[str], optional): The URL to the SiliconFlow service. If
47
+ not provided, :obj:`https://api.siliconflow.cn/v1/` will be used.
48
+ (default: :obj:`None`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
51
+ ModelType.GPT_4O_MINI)` will be used.
52
+ (default: :obj:`None`)
53
+ """
54
+
55
+ @api_keys_required(
56
+ [
57
+ ("api_key", 'SILICONFLOW_API_KEY'),
58
+ ]
59
+ )
60
+ def __init__(
61
+ self,
62
+ model_type: Union[ModelType, str],
63
+ model_config_dict: Optional[Dict[str, Any]] = None,
64
+ api_key: Optional[str] = None,
65
+ url: Optional[str] = None,
66
+ token_counter: Optional[BaseTokenCounter] = None,
67
+ ) -> None:
68
+ if model_config_dict is None:
69
+ model_config_dict = SiliconFlowConfig().as_dict()
70
+ api_key = api_key or os.environ.get("SILICONFLOW_API_KEY")
71
+ url = url or os.environ.get(
72
+ "SILICONFLOW_API_BASE_URL",
73
+ "https://api.siliconflow.cn/v1/",
74
+ )
75
+ super().__init__(
76
+ model_type, model_config_dict, api_key, url, token_counter
77
+ )
78
+ self._client = OpenAI(
79
+ timeout=180,
80
+ max_retries=3,
81
+ api_key=self._api_key,
82
+ base_url=self._url,
83
+ )
84
+
85
+ def run(
86
+ self,
87
+ messages: List[OpenAIMessage],
88
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
+ r"""Runs inference of SiliconFlow chat completion.
90
+
91
+ Args:
92
+ messages (List[OpenAIMessage]): Message list with the chat history
93
+ in OpenAI API format.
94
+
95
+ Returns:
96
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
97
+ `ChatCompletion` in the non-stream mode, or
98
+ `Stream[ChatCompletionChunk]` in the stream mode.
99
+ """
100
+ response = self._client.chat.completions.create(
101
+ messages=messages,
102
+ model=self.model_type,
103
+ **self.model_config_dict,
104
+ )
105
+ return response
106
+
107
+ @property
108
+ def token_counter(self) -> BaseTokenCounter:
109
+ r"""Initialize the token counter for the model backend.
110
+
111
+ Returns:
112
+ BaseTokenCounter: The token counter following the model's
113
+ tokenization style.
114
+ """
115
+ if not self._token_counter:
116
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
117
+ return self._token_counter
118
+
119
+ def check_model_config(self):
120
+ r"""Check whether the model configuration contains any
121
+ unexpected arguments to SiliconFlow API.
122
+
123
+ Raises:
124
+ ValueError: If the model configuration dictionary contains any
125
+ unexpected arguments to SiliconFlow API.
126
+ """
127
+ for param in self.model_config_dict:
128
+ if param not in SILICONFLOW_API_PARAMS:
129
+ raise ValueError(
130
+ f"Unexpected argument `{param}` is "
131
+ "input into SiliconFlow model backend."
132
+ )
133
+
134
+ @property
135
+ def stream(self) -> bool:
136
+ """Returns whether the model is in stream mode, which sends partial
137
+ results each time.
138
+
139
+ Returns:
140
+ bool: Whether the model is in stream mode.
141
+ """
142
+ return self.model_config_dict.get('stream', False)
@@ -45,6 +45,7 @@ from .human_toolkit import HumanToolkit
45
45
  from .stripe_toolkit import StripeToolkit
46
46
  from .video_toolkit import VideoDownloaderToolkit
47
47
  from .dappier_toolkit import DappierToolkit
48
+ from .semantic_scholar_toolkit import SemanticScholarToolkit
48
49
 
49
50
  __all__ = [
50
51
  'BaseToolkit',
@@ -77,4 +78,5 @@ __all__ = [
77
78
  'MeshyToolkit',
78
79
  'OpenBBToolkit',
79
80
  'DappierToolkit',
81
+ 'SemanticScholarToolkit',
80
82
  ]
@@ -13,10 +13,9 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import xml.etree.ElementTree as ET
16
- from typing import Any, Dict, List, Literal, Optional, Type, TypeAlias, Union
16
+ from typing import Any, Dict, List, Literal, Optional, TypeAlias, Union
17
17
 
18
18
  import requests
19
- from pydantic import BaseModel
20
19
 
21
20
  from camel.toolkits.base import BaseToolkit
22
21
  from camel.toolkits.function_tool import FunctionTool
@@ -77,7 +76,7 @@ class SearchToolkit(BaseToolkit):
77
76
  output_type: Literal[
78
77
  "searchResults", "sourcedAnswer", "structured"
79
78
  ] = "searchResults",
80
- structured_output_schema: Union[Type[BaseModel], str, None] = None,
79
+ structured_output_schema: Optional[str] = None,
81
80
  ) -> Dict[str, Any]:
82
81
  r"""Search for a query in the Linkup API and return results in various
83
82
  formats.
@@ -92,9 +91,9 @@ class SearchToolkit(BaseToolkit):
92
91
  - "searchResults" for raw search results,
93
92
  - "sourcedAnswer" for an answer with supporting sources,
94
93
  - "structured" for output based on a provided schema.
95
- structured_output_schema (Union[Type[BaseModel], str, None]): If
96
- `output_type` is "structured",specify the schema of the
97
- output. Can be a Pydantic BaseModel or a JSON schema string.
94
+ structured_output_schema (Optional[str]): If `output_type` is
95
+ "structured", specify the schema of the output. Must be a
96
+ string representing a valid object JSON schema.
98
97
 
99
98
  Returns:
100
99
  Dict[str, Any]: A dictionary representing the search result. The
@@ -581,6 +580,18 @@ class SearchToolkit(BaseToolkit):
581
580
  "image_url": image_url,
582
581
  }
583
582
 
583
+ # For Results pod, collect all plaintext values from subpods
584
+ if pod.get("@title") == "Results":
585
+ results_text = []
586
+ if isinstance(subpod_data, list):
587
+ for subpod in subpod_data:
588
+ if subpod.get("plaintext"):
589
+ results_text.append(subpod["plaintext"])
590
+ else:
591
+ if description:
592
+ results_text.append(description)
593
+ pod_info["description"] = "\n".join(results_text)
594
+
584
595
  # Add to steps list
585
596
  output["pod_info"].append(pod_info)
586
597