camel-ai 0.2.38__py3-none-any.whl → 0.2.39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +4 -0
- camel/agents/repo_agent.py +2 -2
- camel/benchmarks/apibank.py +1 -1
- camel/benchmarks/apibench.py +1 -1
- camel/configs/__init__.py +3 -0
- camel/configs/modelscope_config.py +59 -0
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/__init__.py +2 -0
- camel/datasets/base_generator.py +22 -9
- camel/datasets/few_shot_generator.py +2 -3
- camel/datasets/self_instruct_generator.py +415 -0
- camel/embeddings/openai_compatible_embedding.py +13 -5
- camel/environments/models.py +1 -1
- camel/environments/single_step.py +155 -89
- camel/interpreters/docker_interpreter.py +1 -1
- camel/interpreters/internal_python_interpreter.py +1 -1
- camel/loaders/unstructured_io.py +2 -1
- camel/memories/blocks/chat_history_block.py +1 -1
- camel/memories/context_creators/score_based.py +2 -2
- camel/models/__init__.py +2 -0
- camel/models/model_factory.py +119 -0
- camel/models/modelscope_model.py +208 -0
- camel/models/openai_audio_models.py +2 -2
- camel/models/openai_model.py +49 -2
- camel/models/togetherai_model.py +2 -2
- camel/models/vllm_model.py +1 -1
- camel/models/zhipuai_model.py +2 -2
- camel/retrievers/vector_retriever.py +1 -1
- camel/storages/graph_storages/neo4j_graph.py +1 -1
- camel/storages/vectordb_storages/base.py +2 -2
- camel/storages/vectordb_storages/milvus.py +2 -2
- camel/storages/vectordb_storages/qdrant.py +2 -2
- camel/tasks/task.py +2 -2
- camel/toolkits/__init__.py +4 -1
- camel/toolkits/arxiv_toolkit.py +2 -1
- camel/toolkits/ask_news_toolkit.py +11 -3
- camel/toolkits/audio_analysis_toolkit.py +2 -0
- camel/toolkits/base.py +3 -0
- camel/toolkits/code_execution.py +3 -1
- camel/toolkits/dappier_toolkit.py +2 -1
- camel/toolkits/data_commons_toolkit.py +2 -0
- camel/toolkits/excel_toolkit.py +2 -0
- camel/toolkits/file_write_toolkit.py +2 -0
- camel/toolkits/github_toolkit.py +6 -4
- camel/toolkits/google_scholar_toolkit.py +2 -0
- camel/toolkits/human_toolkit.py +17 -1
- camel/toolkits/image_analysis_toolkit.py +2 -0
- camel/toolkits/linkedin_toolkit.py +2 -1
- camel/toolkits/math_toolkit.py +2 -0
- camel/toolkits/mcp_toolkit.py +42 -52
- camel/toolkits/meshy_toolkit.py +20 -2
- camel/toolkits/networkx_toolkit.py +2 -0
- camel/toolkits/notion_toolkit.py +7 -0
- camel/toolkits/openbb_toolkit.py +2 -1
- camel/toolkits/pubmed_toolkit.py +2 -0
- camel/toolkits/reddit_toolkit.py +2 -1
- camel/toolkits/retrieval_toolkit.py +2 -1
- camel/toolkits/search_toolkit.py +2 -1
- camel/toolkits/semantic_scholar_toolkit.py +2 -0
- camel/toolkits/slack_toolkit.py +2 -0
- camel/toolkits/stripe_toolkit.py +2 -1
- camel/toolkits/sympy_toolkit.py +2 -0
- camel/toolkits/terminal_toolkit.py +2 -0
- camel/toolkits/twitter_toolkit.py +2 -1
- camel/toolkits/video_analysis_toolkit.py +2 -1
- camel/toolkits/video_download_toolkit.py +2 -1
- camel/toolkits/weather_toolkit.py +2 -0
- camel/toolkits/whatsapp_toolkit.py +2 -1
- camel/toolkits/zapier_toolkit.py +2 -1
- camel/types/enums.py +65 -0
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +2 -0
- camel/utils/chunker/code_chunker.py +9 -9
- camel/utils/commons.py +50 -30
- camel/utils/constants.py +2 -2
- camel/utils/mcp.py +79 -0
- camel/verifiers/__init__.py +2 -0
- camel/verifiers/base.py +15 -15
- camel/verifiers/math_verifier.py +182 -0
- camel/verifiers/python_verifier.py +18 -26
- {camel_ai-0.2.38.dist-info → camel_ai-0.2.39.dist-info}/METADATA +3 -1
- {camel_ai-0.2.38.dist-info → camel_ai-0.2.39.dist-info}/RECORD +85 -80
- {camel_ai-0.2.38.dist-info → camel_ai-0.2.39.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.38.dist-info → camel_ai-0.2.39.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
17
|
+
|
|
18
|
+
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
19
|
+
from pydantic import BaseModel
|
|
20
|
+
|
|
21
|
+
from camel.configs import MODELSCOPE_API_PARAMS, ModelScopeConfig
|
|
22
|
+
from camel.messages import OpenAIMessage
|
|
23
|
+
from camel.models import BaseModelBackend
|
|
24
|
+
from camel.models._utils import try_modify_message_with_format
|
|
25
|
+
from camel.types import (
|
|
26
|
+
ChatCompletion,
|
|
27
|
+
ChatCompletionChunk,
|
|
28
|
+
ModelType,
|
|
29
|
+
)
|
|
30
|
+
from camel.utils import (
|
|
31
|
+
BaseTokenCounter,
|
|
32
|
+
OpenAITokenCounter,
|
|
33
|
+
api_keys_required,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ModelScopeModel(BaseModelBackend):
|
|
38
|
+
r"""ModelScope API in a unified BaseModelBackend interface.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
42
|
+
created, one of ModelScope series.
|
|
43
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
44
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
45
|
+
:obj:`None`, :obj:`ModelScopeConfig().as_dict()` will be used.
|
|
46
|
+
(default: :obj:`None`)
|
|
47
|
+
api_key (Optional[str], optional): The MODELSCOPE_SDK_TOKEN for
|
|
48
|
+
authenticating with the ModelScope service. (default: :obj:`None`)
|
|
49
|
+
refer to the following link for more details:
|
|
50
|
+
https://modelscope.cn/my/myaccesstoken
|
|
51
|
+
url (Optional[str], optional): The url to the ModelScope service.
|
|
52
|
+
(default: :obj:`https://api-inference.modelscope.cn/v1/`)
|
|
53
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
54
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
55
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
58
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
59
|
+
environment variable or default to 180 seconds.
|
|
60
|
+
(default: :obj:`None`)
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
@api_keys_required(
|
|
64
|
+
[
|
|
65
|
+
("api_key", 'MODELSCOPE_SDK_TOKEN'),
|
|
66
|
+
]
|
|
67
|
+
)
|
|
68
|
+
def __init__(
|
|
69
|
+
self,
|
|
70
|
+
model_type: Union[ModelType, str],
|
|
71
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
72
|
+
api_key: Optional[str] = None,
|
|
73
|
+
url: Optional[str] = None,
|
|
74
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
75
|
+
timeout: Optional[float] = None,
|
|
76
|
+
) -> None:
|
|
77
|
+
if model_config_dict is None:
|
|
78
|
+
model_config_dict = ModelScopeConfig().as_dict()
|
|
79
|
+
api_key = api_key or os.environ.get("MODELSCOPE_SDK_TOKEN")
|
|
80
|
+
url = url or os.environ.get(
|
|
81
|
+
"MODELSCOPE_API_BASE_URL",
|
|
82
|
+
"https://api-inference.modelscope.cn/v1/",
|
|
83
|
+
)
|
|
84
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
85
|
+
super().__init__(
|
|
86
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
87
|
+
)
|
|
88
|
+
self._client = OpenAI(
|
|
89
|
+
timeout=self._timeout,
|
|
90
|
+
max_retries=3,
|
|
91
|
+
api_key=self._api_key,
|
|
92
|
+
base_url=self._url,
|
|
93
|
+
)
|
|
94
|
+
self._async_client = AsyncOpenAI(
|
|
95
|
+
timeout=self._timeout,
|
|
96
|
+
max_retries=3,
|
|
97
|
+
api_key=self._api_key,
|
|
98
|
+
base_url=self._url,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
async def _arun(
|
|
102
|
+
self,
|
|
103
|
+
messages: List[OpenAIMessage],
|
|
104
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
105
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
106
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
107
|
+
r"""Runs inference of ModelScope chat completion.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
111
|
+
in OpenAI API format.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
115
|
+
`ChatCompletion` in the non-stream mode, or
|
|
116
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
117
|
+
"""
|
|
118
|
+
request_config = self._prepare_request(
|
|
119
|
+
messages, response_format, tools
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
response = await self._async_client.chat.completions.create(
|
|
123
|
+
messages=messages,
|
|
124
|
+
model=self.model_type,
|
|
125
|
+
**request_config,
|
|
126
|
+
)
|
|
127
|
+
return response
|
|
128
|
+
|
|
129
|
+
def _run(
|
|
130
|
+
self,
|
|
131
|
+
messages: List[OpenAIMessage],
|
|
132
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
133
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
134
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
135
|
+
r"""Runs inference of ModelScope chat completion.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
139
|
+
in OpenAI API format.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
143
|
+
`ChatCompletion` in the non-stream mode, or
|
|
144
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
145
|
+
"""
|
|
146
|
+
request_config = self._prepare_request(
|
|
147
|
+
messages, response_format, tools
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
response = self._client.chat.completions.create(
|
|
151
|
+
messages=messages,
|
|
152
|
+
model=self.model_type,
|
|
153
|
+
**request_config,
|
|
154
|
+
)
|
|
155
|
+
return response
|
|
156
|
+
|
|
157
|
+
def _prepare_request(
|
|
158
|
+
self,
|
|
159
|
+
messages: List[OpenAIMessage],
|
|
160
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
161
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
162
|
+
) -> Dict[str, Any]:
|
|
163
|
+
request_config = self.model_config_dict.copy()
|
|
164
|
+
if tools:
|
|
165
|
+
request_config["tools"] = tools
|
|
166
|
+
elif response_format:
|
|
167
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
168
|
+
request_config["response_format"] = {"type": "json_object"}
|
|
169
|
+
|
|
170
|
+
return request_config
|
|
171
|
+
|
|
172
|
+
@property
|
|
173
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
174
|
+
r"""Initialize the token counter for the model backend.
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
OpenAITokenCounter: The token counter following the model's
|
|
178
|
+
tokenization style.
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
if not self._token_counter:
|
|
182
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
183
|
+
return self._token_counter
|
|
184
|
+
|
|
185
|
+
def check_model_config(self):
|
|
186
|
+
r"""Check whether the model configuration contains any
|
|
187
|
+
unexpected arguments to ModelScope API.
|
|
188
|
+
|
|
189
|
+
Raises:
|
|
190
|
+
ValueError: If the model configuration dictionary contains any
|
|
191
|
+
unexpected arguments to ModelScope API.
|
|
192
|
+
"""
|
|
193
|
+
for param in self.model_config_dict:
|
|
194
|
+
if param not in MODELSCOPE_API_PARAMS:
|
|
195
|
+
raise ValueError(
|
|
196
|
+
f"Unexpected argument `{param}` is "
|
|
197
|
+
"input into ModelScope model backend."
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
@property
|
|
201
|
+
def stream(self) -> bool:
|
|
202
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
203
|
+
results each time.
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
bool: Whether the model is in stream mode.
|
|
207
|
+
"""
|
|
208
|
+
return self.model_config_dict.get('stream', False)
|
|
@@ -78,8 +78,8 @@ class OpenAIAudioModels(BaseAudioModel):
|
|
|
78
78
|
Returns:
|
|
79
79
|
Union[List[_legacy_response.HttpxBinaryResponseContent],
|
|
80
80
|
_legacy_response.HttpxBinaryResponseContent]: List of response
|
|
81
|
-
content object from OpenAI if input
|
|
82
|
-
single response content if input
|
|
81
|
+
content object from OpenAI if input characters more than 4096,
|
|
82
|
+
single response content if input characters less than 4096.
|
|
83
83
|
|
|
84
84
|
Raises:
|
|
85
85
|
Exception: If there's an error during the TTS API call.
|
camel/models/openai_model.py
CHANGED
|
@@ -104,7 +104,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
104
104
|
)
|
|
105
105
|
|
|
106
106
|
def _sanitize_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
107
|
-
"""Sanitize the model configuration for O1 models."""
|
|
107
|
+
r"""Sanitize the model configuration for O1 models."""
|
|
108
108
|
|
|
109
109
|
if self.model_type in [
|
|
110
110
|
ModelType.O1,
|
|
@@ -113,7 +113,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
113
113
|
ModelType.O3_MINI,
|
|
114
114
|
]:
|
|
115
115
|
warnings.warn(
|
|
116
|
-
"Warning: You are using an
|
|
116
|
+
"Warning: You are using an reasoning model (O1 or O3), "
|
|
117
117
|
"which has certain limitations, reference: "
|
|
118
118
|
"`https://platform.openai.com/docs/guides/reasoning`.",
|
|
119
119
|
UserWarning,
|
|
@@ -125,6 +125,52 @@ class OpenAIModel(BaseModelBackend):
|
|
|
125
125
|
}
|
|
126
126
|
return config_dict
|
|
127
127
|
|
|
128
|
+
def _adapt_messages_for_o1_models(
|
|
129
|
+
self, messages: List[OpenAIMessage]
|
|
130
|
+
) -> List[OpenAIMessage]:
|
|
131
|
+
r"""Adjust message roles to comply with O1 model requirements by
|
|
132
|
+
converting 'system' or 'developer' to 'user' role.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
136
|
+
in OpenAI API format.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
processed_messages (List[OpenAIMessage]): Return a new list of
|
|
140
|
+
messages to avoid mutating input.
|
|
141
|
+
"""
|
|
142
|
+
|
|
143
|
+
# Define supported O1 model types as a class constant would be better
|
|
144
|
+
O1_MODEL_TYPES = {ModelType.O1_MINI, ModelType.O1_PREVIEW}
|
|
145
|
+
|
|
146
|
+
if self.model_type not in O1_MODEL_TYPES:
|
|
147
|
+
return messages.copy()
|
|
148
|
+
|
|
149
|
+
# Issue warning only once using class state
|
|
150
|
+
if not hasattr(self, "_o1_warning_issued"):
|
|
151
|
+
warnings.warn(
|
|
152
|
+
"O1 models (O1_MINI/O1_PREVIEW) have role limitations: "
|
|
153
|
+
"System or Developer messages will be converted to user role."
|
|
154
|
+
"Reference: https://community.openai.com/t/"
|
|
155
|
+
"developer-role-not-accepted-for-o1-o1-mini-o3-mini/1110750/7",
|
|
156
|
+
UserWarning,
|
|
157
|
+
stacklevel=2,
|
|
158
|
+
)
|
|
159
|
+
self._o1_warning_issued = True
|
|
160
|
+
|
|
161
|
+
# Create new message list to avoid mutating input
|
|
162
|
+
processed_messages = []
|
|
163
|
+
for message in messages:
|
|
164
|
+
processed_message = message.copy()
|
|
165
|
+
if (
|
|
166
|
+
processed_message["role"] == "system"
|
|
167
|
+
or processed_message["role"] == "developer"
|
|
168
|
+
):
|
|
169
|
+
processed_message["role"] = "user" # type: ignore[arg-type]
|
|
170
|
+
processed_messages.append(processed_message)
|
|
171
|
+
|
|
172
|
+
return processed_messages
|
|
173
|
+
|
|
128
174
|
@property
|
|
129
175
|
def token_counter(self) -> BaseTokenCounter:
|
|
130
176
|
r"""Initialize the token counter for the model backend.
|
|
@@ -158,6 +204,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
158
204
|
`ChatCompletion` in the non-stream mode, or
|
|
159
205
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
160
206
|
"""
|
|
207
|
+
messages = self._adapt_messages_for_o1_models(messages)
|
|
161
208
|
response_format = response_format or self.model_config_dict.get(
|
|
162
209
|
"response_format", None
|
|
163
210
|
)
|
camel/models/togetherai_model.py
CHANGED
|
@@ -113,7 +113,7 @@ class TogetherAIModel(BaseModelBackend):
|
|
|
113
113
|
`ChatCompletion` in the non-stream mode, or
|
|
114
114
|
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
115
115
|
"""
|
|
116
|
-
# Use OpenAI
|
|
116
|
+
# Use OpenAI client as interface call Together AI
|
|
117
117
|
# Reference: https://docs.together.ai/docs/openai-api-compatibility
|
|
118
118
|
response = await self._async_client.chat.completions.create(
|
|
119
119
|
messages=messages,
|
|
@@ -139,7 +139,7 @@ class TogetherAIModel(BaseModelBackend):
|
|
|
139
139
|
`ChatCompletion` in the non-stream mode, or
|
|
140
140
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
141
141
|
"""
|
|
142
|
-
# Use OpenAI
|
|
142
|
+
# Use OpenAI client as interface call Together AI
|
|
143
143
|
# Reference: https://docs.together.ai/docs/openai-api-compatibility
|
|
144
144
|
response = self._client.chat.completions.create(
|
|
145
145
|
messages=messages,
|
camel/models/vllm_model.py
CHANGED
camel/models/zhipuai_model.py
CHANGED
|
@@ -111,7 +111,7 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
111
111
|
`ChatCompletion` in the non-stream mode, or
|
|
112
112
|
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
113
113
|
"""
|
|
114
|
-
# Use OpenAI
|
|
114
|
+
# Use OpenAI client as interface call ZhipuAI
|
|
115
115
|
# Reference: https://open.bigmodel.cn/dev/api#openai_sdk
|
|
116
116
|
response = await self._async_client.chat.completions.create(
|
|
117
117
|
messages=messages,
|
|
@@ -137,7 +137,7 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
137
137
|
`ChatCompletion` in the non-stream mode, or
|
|
138
138
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
139
139
|
"""
|
|
140
|
-
# Use OpenAI
|
|
140
|
+
# Use OpenAI client as interface call ZhipuAI
|
|
141
141
|
# Reference: https://open.bigmodel.cn/dev/api#openai_sdk
|
|
142
142
|
response = self._client.chat.completions.create(
|
|
143
143
|
messages=messages,
|
|
@@ -224,7 +224,7 @@ class VectorRetriever(BaseRetriever):
|
|
|
224
224
|
if top_k <= 0:
|
|
225
225
|
raise ValueError("top_k must be a positive integer.")
|
|
226
226
|
|
|
227
|
-
# Load the storage
|
|
227
|
+
# Load the storage in case it's hosted remote
|
|
228
228
|
self.storage.load()
|
|
229
229
|
|
|
230
230
|
query_vector = self.embedding_model.embed(obj=query)
|
|
@@ -69,7 +69,7 @@ class Neo4jGraph(BaseGraphStorage):
|
|
|
69
69
|
The detailed information about Neo4j is available at:
|
|
70
70
|
`Neo4j https://neo4j.com/docs/getting-started`
|
|
71
71
|
|
|
72
|
-
This module
|
|
72
|
+
This module referred to the work of Langchian and Llamaindex.
|
|
73
73
|
|
|
74
74
|
Args:
|
|
75
75
|
url (str): The URL of the Neo4j database server.
|
|
@@ -100,7 +100,7 @@ class VectorDBStatus(BaseModel):
|
|
|
100
100
|
r"""Vector database status.
|
|
101
101
|
|
|
102
102
|
Attributes:
|
|
103
|
-
vector_dim (int): The
|
|
103
|
+
vector_dim (int): The dimension of stored vectors.
|
|
104
104
|
vector_count (int): The number of stored vectors.
|
|
105
105
|
|
|
106
106
|
"""
|
|
@@ -204,7 +204,7 @@ class BaseVectorStorage(ABC):
|
|
|
204
204
|
|
|
205
205
|
Args:
|
|
206
206
|
vector (List[float]): The search vector.
|
|
207
|
-
top_k (int): The number of top
|
|
207
|
+
top_k (int): The number of top similar vectors.
|
|
208
208
|
|
|
209
209
|
Returns:
|
|
210
210
|
List[List[Dict[str, Any]]]: A list of vector payloads retrieved
|
|
@@ -36,7 +36,7 @@ class MilvusStorage(BaseVectorStorage):
|
|
|
36
36
|
`Milvus <https://milvus.io/docs/overview.md/>`_
|
|
37
37
|
|
|
38
38
|
Args:
|
|
39
|
-
vector_dim (int): The
|
|
39
|
+
vector_dim (int): The dimension of storing vectors.
|
|
40
40
|
url_and_api_key (Tuple[str, str]): Tuple containing
|
|
41
41
|
the URL and API key for connecting to a remote Milvus instance.
|
|
42
42
|
URL maps to Milvus uri concept, typically "endpoint:port".
|
|
@@ -136,7 +136,7 @@ class MilvusStorage(BaseVectorStorage):
|
|
|
136
136
|
schema.add_field(
|
|
137
137
|
field_name="id",
|
|
138
138
|
datatype=DataType.VARCHAR,
|
|
139
|
-
|
|
139
|
+
description='A unique identifier for the vector',
|
|
140
140
|
is_primary=True,
|
|
141
141
|
max_length=65535,
|
|
142
142
|
)
|
|
@@ -40,7 +40,7 @@ class QdrantStorage(BaseVectorStorage):
|
|
|
40
40
|
`Qdrant <https://qdrant.tech/>`_
|
|
41
41
|
|
|
42
42
|
Args:
|
|
43
|
-
vector_dim (int): The
|
|
43
|
+
vector_dim (int): The dimension of storing vectors.
|
|
44
44
|
collection_name (Optional[str], optional): Name for the collection in
|
|
45
45
|
the Qdrant. If not provided, set it to the current time with iso
|
|
46
46
|
format. (default: :obj:`None`)
|
|
@@ -218,7 +218,7 @@ class QdrantStorage(BaseVectorStorage):
|
|
|
218
218
|
)
|
|
219
219
|
|
|
220
220
|
def _collection_exists(self, collection_name: str) -> bool:
|
|
221
|
-
r"""Returns
|
|
221
|
+
r"""Returns whether the collection exists in the database"""
|
|
222
222
|
for c in self._client.get_collections().collections:
|
|
223
223
|
if collection_name == c.name:
|
|
224
224
|
return True
|
camel/tasks/task.py
CHANGED
|
@@ -178,7 +178,7 @@ class Task(BaseModel):
|
|
|
178
178
|
return None
|
|
179
179
|
|
|
180
180
|
def to_string(self, indent: str = "", state: bool = False) -> str:
|
|
181
|
-
r"""Convert task to a
|
|
181
|
+
r"""Convert task to a string.
|
|
182
182
|
|
|
183
183
|
Args:
|
|
184
184
|
indent (str): The ident for hierarchical tasks.
|
|
@@ -196,7 +196,7 @@ class Task(BaseModel):
|
|
|
196
196
|
return _str
|
|
197
197
|
|
|
198
198
|
def get_result(self, indent: str = "") -> str:
|
|
199
|
-
r"""Get task result to a
|
|
199
|
+
r"""Get task result to a string.
|
|
200
200
|
|
|
201
201
|
Args:
|
|
202
202
|
indent (str): The ident for hierarchical tasks.
|
camel/toolkits/__init__.py
CHANGED
|
@@ -37,6 +37,7 @@ from .github_toolkit import GithubToolkit
|
|
|
37
37
|
from .google_scholar_toolkit import GoogleScholarToolkit
|
|
38
38
|
from .arxiv_toolkit import ArxivToolkit
|
|
39
39
|
from .slack_toolkit import SlackToolkit
|
|
40
|
+
from .whatsapp_toolkit import WhatsAppToolkit
|
|
40
41
|
from .twitter_toolkit import TwitterToolkit
|
|
41
42
|
from .open_api_toolkit import OpenAPIToolkit
|
|
42
43
|
from .retrieval_toolkit import RetrievalToolkit
|
|
@@ -60,6 +61,7 @@ from .browser_toolkit import BrowserToolkit
|
|
|
60
61
|
from .file_write_toolkit import FileWriteToolkit
|
|
61
62
|
from .terminal_toolkit import TerminalToolkit
|
|
62
63
|
from .pubmed_toolkit import PubMedToolkit
|
|
64
|
+
from .data_commons_toolkit import DataCommonsToolkit
|
|
63
65
|
from .thinking_toolkit import ThinkingToolkit
|
|
64
66
|
from .openai_agent_toolkit import OpenAIAgentToolkit
|
|
65
67
|
from .searxng_toolkit import SearxNGToolkit
|
|
@@ -77,6 +79,7 @@ __all__ = [
|
|
|
77
79
|
'GoogleMapsToolkit',
|
|
78
80
|
'SearchToolkit',
|
|
79
81
|
'SlackToolkit',
|
|
82
|
+
'WhatsAppToolkit',
|
|
80
83
|
'DalleToolkit',
|
|
81
84
|
'TwitterToolkit',
|
|
82
85
|
'WeatherToolkit',
|
|
@@ -103,7 +106,6 @@ __all__ = [
|
|
|
103
106
|
'MinerUToolkit',
|
|
104
107
|
'MemoryToolkit',
|
|
105
108
|
'MCPToolkit',
|
|
106
|
-
'MCPToolkitManager',
|
|
107
109
|
'AudioAnalysisToolkit',
|
|
108
110
|
'ExcelToolkit',
|
|
109
111
|
'VideoAnalysisToolkit',
|
|
@@ -112,6 +114,7 @@ __all__ = [
|
|
|
112
114
|
'FileWriteToolkit',
|
|
113
115
|
'TerminalToolkit',
|
|
114
116
|
'PubMedToolkit',
|
|
117
|
+
'DataCommonsToolkit',
|
|
115
118
|
'ThinkingToolkit',
|
|
116
119
|
'OpenAIAgentToolkit',
|
|
117
120
|
'SearxNGToolkit',
|
camel/toolkits/arxiv_toolkit.py
CHANGED
|
@@ -17,11 +17,12 @@ from typing import Dict, Generator, List, Optional
|
|
|
17
17
|
from camel.logger import get_logger
|
|
18
18
|
from camel.toolkits.base import BaseToolkit
|
|
19
19
|
from camel.toolkits.function_tool import FunctionTool
|
|
20
|
-
from camel.utils import dependencies_required
|
|
20
|
+
from camel.utils import MCPServer, dependencies_required
|
|
21
21
|
|
|
22
22
|
logger = get_logger(__name__)
|
|
23
23
|
|
|
24
24
|
|
|
25
|
+
@MCPServer()
|
|
25
26
|
class ArxivToolkit(BaseToolkit):
|
|
26
27
|
r"""A toolkit for interacting with the arXiv API to search and download
|
|
27
28
|
academic papers.
|
|
@@ -17,6 +17,7 @@ from typing import List, Literal, Optional, Tuple, Union
|
|
|
17
17
|
|
|
18
18
|
from camel.toolkits import FunctionTool
|
|
19
19
|
from camel.toolkits.base import BaseToolkit
|
|
20
|
+
from camel.utils import MCPServer, api_keys_required
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
def _process_response(
|
|
@@ -55,6 +56,10 @@ def _process_response(
|
|
|
55
56
|
raise ValueError(f"Invalid return_type: {return_type}")
|
|
56
57
|
|
|
57
58
|
|
|
59
|
+
@api_keys_required(
|
|
60
|
+
[(None, "ASKNEWS_CLIENT_ID"), (None, "ASKNEWS_CLIENT_SECRET")]
|
|
61
|
+
)
|
|
62
|
+
@MCPServer()
|
|
58
63
|
class AskNewsToolkit(BaseToolkit):
|
|
59
64
|
r"""A class representing a toolkit for interacting with the AskNews API.
|
|
60
65
|
|
|
@@ -62,7 +67,10 @@ class AskNewsToolkit(BaseToolkit):
|
|
|
62
67
|
based on user queries using the AskNews API.
|
|
63
68
|
"""
|
|
64
69
|
|
|
65
|
-
def __init__(
|
|
70
|
+
def __init__(
|
|
71
|
+
self,
|
|
72
|
+
timeout: Optional[float] = None,
|
|
73
|
+
):
|
|
66
74
|
r"""Initialize the AskNewsToolkit with API clients.The API keys and
|
|
67
75
|
credentials are retrieved from environment variables.
|
|
68
76
|
"""
|
|
@@ -152,7 +160,7 @@ class AskNewsToolkit(BaseToolkit):
|
|
|
152
160
|
each update. (default: :obj:`10`)
|
|
153
161
|
|
|
154
162
|
Returns:
|
|
155
|
-
|
|
163
|
+
Union[dict, str]: A dictionary containing the stories and their
|
|
156
164
|
associated data, or error message if the process fails.
|
|
157
165
|
"""
|
|
158
166
|
try:
|
|
@@ -446,7 +454,7 @@ class AsyncAskNewsToolkit(BaseToolkit):
|
|
|
446
454
|
each update. (default: :obj:`10`)
|
|
447
455
|
|
|
448
456
|
Returns:
|
|
449
|
-
|
|
457
|
+
Union[dict, str]: A dictionary containing the stories and their
|
|
450
458
|
associated data, or error message if the process fails.
|
|
451
459
|
"""
|
|
452
460
|
try:
|
|
@@ -23,6 +23,7 @@ from camel.messages import BaseMessage
|
|
|
23
23
|
from camel.models import BaseAudioModel, BaseModelBackend, OpenAIAudioModels
|
|
24
24
|
from camel.toolkits.base import BaseToolkit
|
|
25
25
|
from camel.toolkits.function_tool import FunctionTool
|
|
26
|
+
from camel.utils import MCPServer
|
|
26
27
|
|
|
27
28
|
logger = get_logger(__name__)
|
|
28
29
|
|
|
@@ -80,6 +81,7 @@ def download_file(url: str, cache_dir: str) -> str:
|
|
|
80
81
|
return local_path
|
|
81
82
|
|
|
82
83
|
|
|
84
|
+
@MCPServer()
|
|
83
85
|
class AudioAnalysisToolkit(BaseToolkit):
|
|
84
86
|
r"""A toolkit for audio processing and analysis.
|
|
85
87
|
|
camel/toolkits/base.py
CHANGED
|
@@ -25,6 +25,9 @@ class BaseToolkit(metaclass=AgentOpsMeta):
|
|
|
25
25
|
timeout (Optional[float]): The timeout for the toolkit.
|
|
26
26
|
"""
|
|
27
27
|
|
|
28
|
+
from mcp.server import FastMCP
|
|
29
|
+
|
|
30
|
+
mcp: FastMCP
|
|
28
31
|
timeout: Optional[float] = None
|
|
29
32
|
|
|
30
33
|
def __init__(self, timeout: Optional[float] = None):
|
camel/toolkits/code_execution.py
CHANGED
|
@@ -22,10 +22,12 @@ from camel.interpreters import (
|
|
|
22
22
|
)
|
|
23
23
|
from camel.toolkits import FunctionTool
|
|
24
24
|
from camel.toolkits.base import BaseToolkit
|
|
25
|
+
from camel.utils import MCPServer
|
|
25
26
|
|
|
26
27
|
|
|
28
|
+
@MCPServer()
|
|
27
29
|
class CodeExecutionToolkit(BaseToolkit):
|
|
28
|
-
r"""A
|
|
30
|
+
r"""A toolkit for code execution.
|
|
29
31
|
|
|
30
32
|
Args:
|
|
31
33
|
sandbox (str): The environment type used to execute code.
|
|
@@ -16,9 +16,10 @@ from typing import Dict, List, Literal, Optional, Union
|
|
|
16
16
|
|
|
17
17
|
from camel.toolkits.base import BaseToolkit
|
|
18
18
|
from camel.toolkits.function_tool import FunctionTool
|
|
19
|
-
from camel.utils import api_keys_required, dependencies_required
|
|
19
|
+
from camel.utils import MCPServer, api_keys_required, dependencies_required
|
|
20
20
|
|
|
21
21
|
|
|
22
|
+
@MCPServer()
|
|
22
23
|
class DappierToolkit(BaseToolkit):
|
|
23
24
|
r"""A class representing a toolkit for interacting with the Dappier API.
|
|
24
25
|
|
|
@@ -16,10 +16,12 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
16
16
|
|
|
17
17
|
from camel.toolkits import FunctionTool
|
|
18
18
|
from camel.toolkits.base import BaseToolkit
|
|
19
|
+
from camel.utils import MCPServer
|
|
19
20
|
|
|
20
21
|
logger = logging.getLogger(__name__)
|
|
21
22
|
|
|
22
23
|
|
|
24
|
+
@MCPServer()
|
|
23
25
|
class DataCommonsToolkit(BaseToolkit):
|
|
24
26
|
r"""A class representing a toolkit for Data Commons.
|
|
25
27
|
|
camel/toolkits/excel_toolkit.py
CHANGED
|
@@ -19,10 +19,12 @@ import pandas as pd
|
|
|
19
19
|
from camel.logger import get_logger
|
|
20
20
|
from camel.toolkits.base import BaseToolkit
|
|
21
21
|
from camel.toolkits.function_tool import FunctionTool
|
|
22
|
+
from camel.utils import MCPServer
|
|
22
23
|
|
|
23
24
|
logger = get_logger(__name__)
|
|
24
25
|
|
|
25
26
|
|
|
27
|
+
@MCPServer()
|
|
26
28
|
class ExcelToolkit(BaseToolkit):
|
|
27
29
|
r"""A class representing a toolkit for extract detailed cell information
|
|
28
30
|
from an Excel file.
|
|
@@ -21,6 +21,7 @@ from typing import List, Optional, Union
|
|
|
21
21
|
from camel.logger import get_logger
|
|
22
22
|
from camel.toolkits.base import BaseToolkit
|
|
23
23
|
from camel.toolkits.function_tool import FunctionTool
|
|
24
|
+
from camel.utils import MCPServer
|
|
24
25
|
|
|
25
26
|
logger = get_logger(__name__)
|
|
26
27
|
|
|
@@ -28,6 +29,7 @@ logger = get_logger(__name__)
|
|
|
28
29
|
DEFAULT_FORMAT = '.md'
|
|
29
30
|
|
|
30
31
|
|
|
32
|
+
@MCPServer()
|
|
31
33
|
class FileWriteToolkit(BaseToolkit):
|
|
32
34
|
r"""A toolkit for creating, writing, and modifying text in files.
|
|
33
35
|
|