camel-ai 0.2.35__py3-none-any.whl → 0.2.37__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/__init__.py +2 -0
- camel/agents/repo_agent.py +579 -0
- camel/configs/__init__.py +3 -0
- camel/configs/aiml_config.py +20 -19
- camel/configs/anthropic_config.py +25 -27
- camel/configs/cohere_config.py +11 -10
- camel/configs/deepseek_config.py +16 -16
- camel/configs/gemini_config.py +8 -8
- camel/configs/groq_config.py +18 -19
- camel/configs/internlm_config.py +8 -8
- camel/configs/litellm_config.py +26 -24
- camel/configs/mistral_config.py +8 -8
- camel/configs/moonshot_config.py +11 -11
- camel/configs/nvidia_config.py +13 -13
- camel/configs/ollama_config.py +14 -15
- camel/configs/openai_config.py +3 -3
- camel/configs/openrouter_config.py +106 -0
- camel/configs/qwen_config.py +8 -8
- camel/configs/reka_config.py +12 -11
- camel/configs/samba_config.py +14 -14
- camel/configs/sglang_config.py +15 -16
- camel/configs/siliconflow_config.py +18 -17
- camel/configs/togetherai_config.py +18 -19
- camel/configs/vllm_config.py +18 -19
- camel/configs/yi_config.py +7 -8
- camel/configs/zhipuai_config.py +8 -9
- camel/datasets/few_shot_generator.py +2 -5
- camel/datasets/static_dataset.py +25 -23
- camel/environments/models.py +3 -0
- camel/environments/single_step.py +212 -132
- camel/extractors/__init__.py +16 -1
- camel/memories/agent_memories.py +2 -1
- camel/memories/blocks/chat_history_block.py +2 -1
- camel/models/__init__.py +2 -0
- camel/models/gemini_model.py +36 -0
- camel/models/groq_model.py +6 -3
- camel/models/model_factory.py +3 -0
- camel/models/openrouter_model.py +204 -0
- camel/storages/__init__.py +2 -0
- camel/storages/key_value_storages/__init__.py +2 -0
- camel/storages/key_value_storages/mem0_cloud.py +224 -0
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/browser_toolkit.py +43 -0
- camel/toolkits/code_execution.py +2 -1
- camel/toolkits/mcp_toolkit.py +30 -1
- camel/toolkits/thinking_toolkit.py +74 -0
- camel/types/enums.py +27 -0
- camel/types/unified_model_type.py +5 -0
- camel/utils/chunker/code_chunker.py +9 -15
- camel/verifiers/__init__.py +1 -2
- camel/verifiers/base.py +159 -99
- camel/verifiers/models.py +0 -12
- camel/verifiers/python_verifier.py +316 -60
- {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/METADATA +54 -5
- {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/RECORD +59 -54
- {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
|
+
|
|
17
|
+
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
18
|
+
from pydantic import BaseModel
|
|
19
|
+
|
|
20
|
+
from camel.configs import OPENROUTER_API_PARAMS, OpenRouterConfig
|
|
21
|
+
from camel.messages import OpenAIMessage
|
|
22
|
+
from camel.models import BaseModelBackend
|
|
23
|
+
from camel.models._utils import try_modify_message_with_format
|
|
24
|
+
from camel.types import (
|
|
25
|
+
ChatCompletion,
|
|
26
|
+
ChatCompletionChunk,
|
|
27
|
+
ModelType,
|
|
28
|
+
)
|
|
29
|
+
from camel.utils import (
|
|
30
|
+
BaseTokenCounter,
|
|
31
|
+
OpenAITokenCounter,
|
|
32
|
+
api_keys_required,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class OpenRouterModel(BaseModelBackend):
|
|
37
|
+
r"""LLM API served by OpenRouter in a unified BaseModelBackend interface.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
41
|
+
created.
|
|
42
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
43
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
44
|
+
If:obj:`None`, :obj:`GroqConfig().as_dict()` will be used.
|
|
45
|
+
(default: :obj:`None`)
|
|
46
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
47
|
+
with the OpenRouter service. (default: :obj:`None`).
|
|
48
|
+
url (Optional[str], optional): The url to the OpenRouter service.
|
|
49
|
+
(default: :obj:`None`)
|
|
50
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
51
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
52
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
53
|
+
(default: :obj:`None`)
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
@api_keys_required([("api_key", "OPENROUTER_API_KEY")])
|
|
57
|
+
def __init__(
|
|
58
|
+
self,
|
|
59
|
+
model_type: Union[ModelType, str],
|
|
60
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
61
|
+
api_key: Optional[str] = None,
|
|
62
|
+
url: Optional[str] = None,
|
|
63
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
64
|
+
) -> None:
|
|
65
|
+
if model_config_dict is None:
|
|
66
|
+
model_config_dict = OpenRouterConfig().as_dict()
|
|
67
|
+
api_key = api_key or os.environ.get("OPENROUTER_API_KEY")
|
|
68
|
+
url = url or os.environ.get(
|
|
69
|
+
"OPENROUTER_API_BASE_URL", "https://openrouter.ai/api/v1"
|
|
70
|
+
)
|
|
71
|
+
super().__init__(
|
|
72
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
73
|
+
)
|
|
74
|
+
self._client = OpenAI(
|
|
75
|
+
timeout=180,
|
|
76
|
+
max_retries=3,
|
|
77
|
+
api_key=self._api_key,
|
|
78
|
+
base_url=self._url,
|
|
79
|
+
)
|
|
80
|
+
self._async_client = AsyncOpenAI(
|
|
81
|
+
timeout=180,
|
|
82
|
+
max_retries=3,
|
|
83
|
+
api_key=self._api_key,
|
|
84
|
+
base_url=self._url,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
89
|
+
r"""Initialize the token counter for the model backend.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
BaseTokenCounter: The token counter following the model's
|
|
93
|
+
tokenization style.
|
|
94
|
+
"""
|
|
95
|
+
if not self._token_counter:
|
|
96
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
97
|
+
return self._token_counter
|
|
98
|
+
|
|
99
|
+
def _prepare_request(
|
|
100
|
+
self,
|
|
101
|
+
messages: List[OpenAIMessage],
|
|
102
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
103
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
104
|
+
) -> Dict[str, Any]:
|
|
105
|
+
request_config = self.model_config_dict.copy()
|
|
106
|
+
if tools:
|
|
107
|
+
request_config["tools"] = tools
|
|
108
|
+
elif response_format:
|
|
109
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
110
|
+
request_config["response_format"] = {"type": "json_object"}
|
|
111
|
+
|
|
112
|
+
return request_config
|
|
113
|
+
|
|
114
|
+
def _run(
|
|
115
|
+
self,
|
|
116
|
+
messages: List[OpenAIMessage],
|
|
117
|
+
response_format: Optional[type[BaseModel]] = None,
|
|
118
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
119
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
120
|
+
r"""Runs inference of OpenAI chat completion.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
124
|
+
in OpenAI API format.
|
|
125
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
126
|
+
response.
|
|
127
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
128
|
+
use for the request.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
132
|
+
`ChatCompletion` in the non-stream mode, or
|
|
133
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
134
|
+
"""
|
|
135
|
+
request_config = self._prepare_request(
|
|
136
|
+
messages, response_format, tools
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
response = self._client.chat.completions.create(
|
|
140
|
+
messages=messages,
|
|
141
|
+
model=self.model_type,
|
|
142
|
+
**request_config,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
return response
|
|
146
|
+
|
|
147
|
+
async def _arun(
|
|
148
|
+
self,
|
|
149
|
+
messages: List[OpenAIMessage],
|
|
150
|
+
response_format: Optional[type[BaseModel]] = None,
|
|
151
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
152
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
153
|
+
r"""Runs inference of OpenRouter chat completion asynchronously.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
157
|
+
in OpenAI API format.
|
|
158
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
159
|
+
response.
|
|
160
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
161
|
+
use for the request.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
165
|
+
`ChatCompletion` in the non-stream mode, or
|
|
166
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
167
|
+
"""
|
|
168
|
+
request_config = self._prepare_request(
|
|
169
|
+
messages, response_format, tools
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
response = await self._async_client.chat.completions.create(
|
|
173
|
+
messages=messages,
|
|
174
|
+
model=self.model_type,
|
|
175
|
+
**request_config,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
return response
|
|
179
|
+
|
|
180
|
+
def check_model_config(self):
|
|
181
|
+
r"""Check whether the model configuration contains any unexpected
|
|
182
|
+
arguments to OpenRouter API. But OpenRouter API does not have any
|
|
183
|
+
additional arguments to check.
|
|
184
|
+
|
|
185
|
+
Raises:
|
|
186
|
+
ValueError: If the model configuration dictionary contains any
|
|
187
|
+
unexpected arguments to OpenRouter API.
|
|
188
|
+
"""
|
|
189
|
+
for param in self.model_config_dict:
|
|
190
|
+
if param not in OPENROUTER_API_PARAMS:
|
|
191
|
+
raise ValueError(
|
|
192
|
+
f"Unexpected argument `{param}` is "
|
|
193
|
+
"input into OpenRouter model backend."
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
@property
|
|
197
|
+
def stream(self) -> bool:
|
|
198
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
199
|
+
results each time.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
bool: Whether the model is in stream mode.
|
|
203
|
+
"""
|
|
204
|
+
return self.model_config_dict.get("stream", False)
|
camel/storages/__init__.py
CHANGED
|
@@ -18,6 +18,7 @@ from .graph_storages.neo4j_graph import Neo4jGraph
|
|
|
18
18
|
from .key_value_storages.base import BaseKeyValueStorage
|
|
19
19
|
from .key_value_storages.in_memory import InMemoryKeyValueStorage
|
|
20
20
|
from .key_value_storages.json import JsonStorage
|
|
21
|
+
from .key_value_storages.mem0_cloud import Mem0Storage
|
|
21
22
|
from .key_value_storages.redis import RedisStorage
|
|
22
23
|
from .vectordb_storages.base import (
|
|
23
24
|
BaseVectorStorage,
|
|
@@ -42,4 +43,5 @@ __all__ = [
|
|
|
42
43
|
'BaseGraphStorage',
|
|
43
44
|
'Neo4jGraph',
|
|
44
45
|
'NebulaGraph',
|
|
46
|
+
'Mem0Storage',
|
|
45
47
|
]
|
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
from .base import BaseKeyValueStorage
|
|
16
16
|
from .in_memory import InMemoryKeyValueStorage
|
|
17
17
|
from .json import CamelJSONEncoder, JsonStorage
|
|
18
|
+
from .mem0_cloud import Mem0Storage
|
|
18
19
|
from .redis import RedisStorage
|
|
19
20
|
|
|
20
21
|
__all__ = [
|
|
@@ -23,4 +24,5 @@ __all__ = [
|
|
|
23
24
|
'JsonStorage',
|
|
24
25
|
'RedisStorage',
|
|
25
26
|
'CamelJSONEncoder',
|
|
27
|
+
'Mem0Storage',
|
|
26
28
|
]
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
import os
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
from typing import Any, Dict, List, Optional
|
|
19
|
+
from uuid import UUID
|
|
20
|
+
|
|
21
|
+
from camel.memories.records import MemoryRecord
|
|
22
|
+
from camel.messages import BaseMessage
|
|
23
|
+
from camel.storages.key_value_storages import BaseKeyValueStorage
|
|
24
|
+
from camel.types import OpenAIBackendRole, RoleType
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Mem0Storage(BaseKeyValueStorage):
|
|
30
|
+
r"""A concrete implementation of the :obj:`BaseKeyValueStorage` using Mem0
|
|
31
|
+
as the backend. This storage system uses Mem0's text capabilities to store,
|
|
32
|
+
search, and manage text with context.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
agent_id (str): Default agent ID to associate memories with.
|
|
36
|
+
api_key (str, optional): The API key for authentication. If not
|
|
37
|
+
provided, will try to get from environment variable MEM0_API_KEY
|
|
38
|
+
(default: :obj:`None`).
|
|
39
|
+
user_id (str, optional): Default user ID to associate memories with
|
|
40
|
+
(default: :obj:`None`).
|
|
41
|
+
metadata (Dict[str, Any], optional): Default metadata to include with
|
|
42
|
+
all memories (default: :obj:`None`).
|
|
43
|
+
|
|
44
|
+
References:
|
|
45
|
+
https://docs.mem0.ai
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
agent_id: str,
|
|
51
|
+
api_key: Optional[str] = None,
|
|
52
|
+
user_id: Optional[str] = None,
|
|
53
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
54
|
+
) -> None:
|
|
55
|
+
try:
|
|
56
|
+
from mem0 import MemoryClient
|
|
57
|
+
except ImportError as exc:
|
|
58
|
+
logger.error(
|
|
59
|
+
"Please install `mem0` first. You can install it by "
|
|
60
|
+
"running `pip install mem0ai`."
|
|
61
|
+
)
|
|
62
|
+
raise exc
|
|
63
|
+
|
|
64
|
+
self.api_key = api_key or os.getenv("MEM0_API_KEY")
|
|
65
|
+
if not self.api_key:
|
|
66
|
+
raise ValueError(
|
|
67
|
+
"API key must be provided either through constructor "
|
|
68
|
+
"or MEM0_API_KEY environment variable."
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
self.client = MemoryClient(api_key=self.api_key)
|
|
72
|
+
self.agent_id = agent_id
|
|
73
|
+
self.user_id = user_id
|
|
74
|
+
self.metadata = metadata or {}
|
|
75
|
+
|
|
76
|
+
def _prepare_options(
|
|
77
|
+
self,
|
|
78
|
+
agent_id: Optional[str] = None,
|
|
79
|
+
user_id: Optional[str] = None,
|
|
80
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
81
|
+
**kwargs: Any,
|
|
82
|
+
) -> Dict[str, Any]:
|
|
83
|
+
r"""Helper method to prepare options for Mem0 API calls.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
agent_id (Optional[str], optional): Agent ID to use (default:
|
|
87
|
+
:obj:`None`).
|
|
88
|
+
user_id (Optional[str], optional): User ID to use (default:
|
|
89
|
+
:obj:`None`).
|
|
90
|
+
metadata (Optional[Dict[str, Any]], optional): Additional metadata
|
|
91
|
+
to include (default: :obj:`None`).
|
|
92
|
+
**kwargs (Any): Additional keyword arguments.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Dict[str, Any]: Prepared options dictionary for API calls.
|
|
96
|
+
"""
|
|
97
|
+
options = {
|
|
98
|
+
"agent_id": agent_id or self.agent_id,
|
|
99
|
+
"user_id": user_id or self.user_id,
|
|
100
|
+
"metadata": {**self.metadata, **(metadata or {})},
|
|
101
|
+
"output_format": "v1.1",
|
|
102
|
+
**kwargs,
|
|
103
|
+
}
|
|
104
|
+
return {k: v for k, v in options.items() if v is not None}
|
|
105
|
+
|
|
106
|
+
def _prepare_filters(
|
|
107
|
+
self,
|
|
108
|
+
agent_id: Optional[str] = None,
|
|
109
|
+
user_id: Optional[str] = None,
|
|
110
|
+
filters: Optional[Dict[str, Any]] = None,
|
|
111
|
+
) -> Dict[str, Any]:
|
|
112
|
+
r"""Helper method to prepare filters for Mem0 API calls.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
agent_id (Optional[str], optional): Agent ID to filter by
|
|
116
|
+
(default: :obj:`None`).
|
|
117
|
+
user_id (Optional[str], optional): User ID to filter by (default:
|
|
118
|
+
:obj:`None`).
|
|
119
|
+
filters (Optional[Dict[str, Any]], optional): Additional filters
|
|
120
|
+
(default: :obj:`None`).
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Dict[str, Any]: Prepared filters dictionary for API calls.
|
|
124
|
+
"""
|
|
125
|
+
base_filters: Dict[str, Any] = {"AND": []}
|
|
126
|
+
if filters:
|
|
127
|
+
base_filters["AND"].append(filters)
|
|
128
|
+
if agent_id or self.agent_id:
|
|
129
|
+
base_filters["AND"].append({"agent_id": agent_id or self.agent_id})
|
|
130
|
+
if user_id or self.user_id:
|
|
131
|
+
base_filters["AND"].append({"user_id": user_id or self.user_id})
|
|
132
|
+
return base_filters if base_filters["AND"] else {}
|
|
133
|
+
|
|
134
|
+
def _prepare_messages(
|
|
135
|
+
self,
|
|
136
|
+
records: List[Dict[str, Any]],
|
|
137
|
+
) -> List[Dict[str, Any]]:
|
|
138
|
+
r"""Prepare messages from records for Mem0 API calls.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
records (List[Dict[str, Any]]): List of record dictionaries.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
List[Dict[str, Any]]: List of prepared message dictionaries.
|
|
145
|
+
"""
|
|
146
|
+
messages = []
|
|
147
|
+
for record in records:
|
|
148
|
+
content = record["message"]["content"]
|
|
149
|
+
role = record["role_at_backend"].value
|
|
150
|
+
messages.append({"role": role, "content": content})
|
|
151
|
+
return messages
|
|
152
|
+
|
|
153
|
+
def save(self, records: List[Dict[str, Any]]) -> None:
|
|
154
|
+
r"""Saves a batch of records to the Mem0 storage system.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
records (List[Dict[str, Any]]): A list of dictionaries, where each
|
|
158
|
+
dictionary represents a unique record to be stored.
|
|
159
|
+
"""
|
|
160
|
+
try:
|
|
161
|
+
messages = self._prepare_messages(records)
|
|
162
|
+
|
|
163
|
+
options = self._prepare_options(
|
|
164
|
+
agent_id=self.agent_id,
|
|
165
|
+
user_id=self.user_id,
|
|
166
|
+
metadata=self.metadata,
|
|
167
|
+
)
|
|
168
|
+
self.client.add(messages, **options)
|
|
169
|
+
except Exception as e:
|
|
170
|
+
logger.error(f"Error adding memory: {e}")
|
|
171
|
+
logger.error(f"Error: {e}")
|
|
172
|
+
|
|
173
|
+
def load(self) -> List[Dict[str, Any]]:
|
|
174
|
+
r"""Loads all stored records from the Mem0 storage system.
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
List[Dict[str, Any]]: A list of dictionaries, where each dictionary
|
|
178
|
+
represents a stored record.
|
|
179
|
+
"""
|
|
180
|
+
try:
|
|
181
|
+
filters = self._prepare_filters(
|
|
182
|
+
agent_id=self.agent_id,
|
|
183
|
+
user_id=self.user_id,
|
|
184
|
+
)
|
|
185
|
+
results = self.client.get_all(version="v2", **filters)
|
|
186
|
+
|
|
187
|
+
# Transform results into MemoryRecord objects
|
|
188
|
+
transformed_results = []
|
|
189
|
+
for result in results:
|
|
190
|
+
memory_record = MemoryRecord(
|
|
191
|
+
uuid=UUID(result["id"]),
|
|
192
|
+
message=BaseMessage(
|
|
193
|
+
role_name="user",
|
|
194
|
+
role_type=RoleType.USER,
|
|
195
|
+
meta_dict={},
|
|
196
|
+
content=result["memory"],
|
|
197
|
+
),
|
|
198
|
+
role_at_backend=OpenAIBackendRole.USER,
|
|
199
|
+
extra_info=result.get("metadata", {}),
|
|
200
|
+
timestamp=datetime.fromisoformat(
|
|
201
|
+
result["created_at"]
|
|
202
|
+
).timestamp(),
|
|
203
|
+
agent_id=result.get("agent_id", ""),
|
|
204
|
+
)
|
|
205
|
+
transformed_results.append(memory_record.to_dict())
|
|
206
|
+
|
|
207
|
+
return transformed_results
|
|
208
|
+
except Exception as e:
|
|
209
|
+
logger.error(f"Error searching memories: {e}")
|
|
210
|
+
return []
|
|
211
|
+
|
|
212
|
+
def clear(
|
|
213
|
+
self,
|
|
214
|
+
) -> None:
|
|
215
|
+
r"""Removes all records from the Mem0 storage system."""
|
|
216
|
+
try:
|
|
217
|
+
filters = self._prepare_filters(
|
|
218
|
+
agent_id=self.agent_id,
|
|
219
|
+
user_id=self.user_id,
|
|
220
|
+
)
|
|
221
|
+
self.client.delete_users(**filters)
|
|
222
|
+
except Exception as e:
|
|
223
|
+
logger.error(f"Error deleting memories: {e}")
|
|
224
|
+
logger.error(f"Error: {e}")
|
|
@@ -450,9 +450,9 @@ class QdrantStorage(BaseVectorStorage):
|
|
|
450
450
|
search_filter = Filter(must=cast(List[Condition], must_conditions))
|
|
451
451
|
|
|
452
452
|
# Execute the search with optional filter
|
|
453
|
-
search_result = self._client.
|
|
453
|
+
search_result = self._client.query_points(
|
|
454
454
|
collection_name=self.collection_name,
|
|
455
|
-
|
|
455
|
+
query=query.query_vector,
|
|
456
456
|
with_payload=True,
|
|
457
457
|
with_vectors=True,
|
|
458
458
|
limit=query.top_k,
|
|
@@ -467,7 +467,7 @@ class QdrantStorage(BaseVectorStorage):
|
|
|
467
467
|
payload=point.payload,
|
|
468
468
|
vector=point.vector, # type: ignore[arg-type]
|
|
469
469
|
)
|
|
470
|
-
for point in search_result
|
|
470
|
+
for point in search_result.points
|
|
471
471
|
]
|
|
472
472
|
|
|
473
473
|
return query_results
|
camel/toolkits/__init__.py
CHANGED
|
@@ -60,6 +60,7 @@ from .browser_toolkit import BrowserToolkit
|
|
|
60
60
|
from .file_write_toolkit import FileWriteToolkit
|
|
61
61
|
from .terminal_toolkit import TerminalToolkit
|
|
62
62
|
from .pubmed_toolkit import PubMedToolkit
|
|
63
|
+
from .thinking_toolkit import ThinkingToolkit
|
|
63
64
|
|
|
64
65
|
__all__ = [
|
|
65
66
|
'BaseToolkit',
|
|
@@ -108,4 +109,5 @@ __all__ = [
|
|
|
108
109
|
'FileWriteToolkit',
|
|
109
110
|
'TerminalToolkit',
|
|
110
111
|
'PubMedToolkit',
|
|
112
|
+
'ThinkingToolkit',
|
|
111
113
|
]
|
|
@@ -438,6 +438,7 @@ class BaseBrowser:
|
|
|
438
438
|
sync_playwright,
|
|
439
439
|
)
|
|
440
440
|
|
|
441
|
+
self._ensure_browser_installed()
|
|
441
442
|
self.history: list = []
|
|
442
443
|
self.headless = headless
|
|
443
444
|
self.playwright = sync_playwright().start()
|
|
@@ -914,6 +915,48 @@ class BaseBrowser:
|
|
|
914
915
|
markdown_content = html2text(html_content)
|
|
915
916
|
return markdown_content
|
|
916
917
|
|
|
918
|
+
def _ensure_browser_installed(self) -> None:
|
|
919
|
+
r"""Ensure the browser is installed."""
|
|
920
|
+
import platform
|
|
921
|
+
import subprocess
|
|
922
|
+
import sys
|
|
923
|
+
|
|
924
|
+
try:
|
|
925
|
+
from playwright.sync_api import sync_playwright
|
|
926
|
+
|
|
927
|
+
with sync_playwright() as p:
|
|
928
|
+
browser = p.chromium.launch()
|
|
929
|
+
browser.close()
|
|
930
|
+
except Exception:
|
|
931
|
+
logger.info("Installing Chromium browser...")
|
|
932
|
+
try:
|
|
933
|
+
subprocess.run(
|
|
934
|
+
[
|
|
935
|
+
sys.executable,
|
|
936
|
+
"-m",
|
|
937
|
+
"playwright",
|
|
938
|
+
"install",
|
|
939
|
+
"chromium",
|
|
940
|
+
],
|
|
941
|
+
check=True,
|
|
942
|
+
capture_output=True,
|
|
943
|
+
)
|
|
944
|
+
if platform.system().lower() == "linux":
|
|
945
|
+
subprocess.run(
|
|
946
|
+
[
|
|
947
|
+
sys.executable,
|
|
948
|
+
"-m",
|
|
949
|
+
"playwright",
|
|
950
|
+
"install-deps",
|
|
951
|
+
"chromium",
|
|
952
|
+
],
|
|
953
|
+
check=True,
|
|
954
|
+
capture_output=True,
|
|
955
|
+
)
|
|
956
|
+
logger.info("Chromium browser installation completed")
|
|
957
|
+
except subprocess.CalledProcessError as e:
|
|
958
|
+
raise RuntimeError(f"Failed to install browser: {e.stderr}")
|
|
959
|
+
|
|
917
960
|
|
|
918
961
|
class BrowserToolkit(BaseToolkit):
|
|
919
962
|
r"""A class for browsing the web and interacting with web pages.
|
camel/toolkits/code_execution.py
CHANGED
|
@@ -29,6 +29,7 @@ class CodeExecutionToolkit(BaseToolkit):
|
|
|
29
29
|
|
|
30
30
|
Args:
|
|
31
31
|
sandbox (str): The environment type used to execute code.
|
|
32
|
+
(default: `subprocess`)
|
|
32
33
|
verbose (bool): Whether to print the output of the code execution.
|
|
33
34
|
(default: :obj:`False`)
|
|
34
35
|
unsafe_mode (bool): If `True`, the interpreter runs the code
|
|
@@ -43,7 +44,7 @@ class CodeExecutionToolkit(BaseToolkit):
|
|
|
43
44
|
self,
|
|
44
45
|
sandbox: Literal[
|
|
45
46
|
"internal_python", "jupyter", "docker", "subprocess", "e2b"
|
|
46
|
-
] = "
|
|
47
|
+
] = "subprocess",
|
|
47
48
|
verbose: bool = False,
|
|
48
49
|
unsafe_mode: bool = False,
|
|
49
50
|
import_white_list: Optional[List[str]] = None,
|
camel/toolkits/mcp_toolkit.py
CHANGED
|
@@ -56,6 +56,8 @@ class _MCPServer(BaseToolkit):
|
|
|
56
56
|
env (Dict[str, str]): Environment variables for the stdio mode command.
|
|
57
57
|
(default: :obj:`'None'`)
|
|
58
58
|
timeout (Optional[float]): Connection timeout. (default: :obj:`'None'`)
|
|
59
|
+
headers (Dict[str, str]): Headers for the HTTP request.
|
|
60
|
+
(default: :obj:`'None'`)
|
|
59
61
|
"""
|
|
60
62
|
|
|
61
63
|
def __init__(
|
|
@@ -64,6 +66,7 @@ class _MCPServer(BaseToolkit):
|
|
|
64
66
|
args: Optional[List[str]] = None,
|
|
65
67
|
env: Optional[Dict[str, str]] = None,
|
|
66
68
|
timeout: Optional[float] = None,
|
|
69
|
+
headers: Optional[Dict[str, str]] = None,
|
|
67
70
|
):
|
|
68
71
|
from mcp import Tool
|
|
69
72
|
from mcp.client.session import ClientSession
|
|
@@ -73,6 +76,7 @@ class _MCPServer(BaseToolkit):
|
|
|
73
76
|
self.command_or_url = command_or_url
|
|
74
77
|
self.args = args or []
|
|
75
78
|
self.env = env or {}
|
|
79
|
+
self.headers = headers or {}
|
|
76
80
|
|
|
77
81
|
self._mcp_tools: List[Tool] = []
|
|
78
82
|
self._session: Optional['ClientSession'] = None
|
|
@@ -99,7 +103,10 @@ class _MCPServer(BaseToolkit):
|
|
|
99
103
|
read_stream,
|
|
100
104
|
write_stream,
|
|
101
105
|
) = await self._exit_stack.enter_async_context(
|
|
102
|
-
sse_client(
|
|
106
|
+
sse_client(
|
|
107
|
+
self.command_or_url,
|
|
108
|
+
headers=self.headers,
|
|
109
|
+
)
|
|
103
110
|
)
|
|
104
111
|
else:
|
|
105
112
|
command = self.command_or_url
|
|
@@ -343,6 +350,27 @@ class MCPToolkit(BaseToolkit):
|
|
|
343
350
|
Either `servers` or `config_path` must be provided. If both are
|
|
344
351
|
provided, servers from both sources will be combined.
|
|
345
352
|
|
|
353
|
+
For web servers in the config file, you can specify authorization
|
|
354
|
+
headers using the "headers" field to connect to protected MCP server
|
|
355
|
+
endpoints.
|
|
356
|
+
|
|
357
|
+
Example configuration:
|
|
358
|
+
|
|
359
|
+
.. code-block:: json
|
|
360
|
+
|
|
361
|
+
{
|
|
362
|
+
"mcpWebServers": {
|
|
363
|
+
"protected-server": {
|
|
364
|
+
"url": "https://example.com/mcp",
|
|
365
|
+
"timeout": 30,
|
|
366
|
+
"headers": {
|
|
367
|
+
"Authorization": "Bearer YOUR_TOKEN",
|
|
368
|
+
"X-API-Key": "YOUR_API_KEY"
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
|
|
346
374
|
Attributes:
|
|
347
375
|
servers (List[_MCPServer]): List of _MCPServer instances being managed.
|
|
348
376
|
"""
|
|
@@ -442,6 +470,7 @@ class MCPToolkit(BaseToolkit):
|
|
|
442
470
|
server = _MCPServer(
|
|
443
471
|
command_or_url=cfg["url"],
|
|
444
472
|
timeout=cfg.get("timeout", None),
|
|
473
|
+
headers=cfg.get("headers", {}),
|
|
445
474
|
)
|
|
446
475
|
all_servers.append(server)
|
|
447
476
|
|