camel-ai 0.2.34__py3-none-any.whl → 0.2.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (47) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +1 -1
  3. camel/agents/_utils.py +4 -4
  4. camel/agents/chat_agent.py +174 -29
  5. camel/configs/__init__.py +3 -0
  6. camel/configs/openai_config.py +20 -16
  7. camel/configs/openrouter_config.py +106 -0
  8. camel/datasets/base_generator.py +188 -27
  9. camel/datasets/few_shot_generator.py +2 -5
  10. camel/environments/single_step.py +1 -7
  11. camel/memories/agent_memories.py +49 -2
  12. camel/memories/base.py +23 -1
  13. camel/memories/blocks/chat_history_block.py +2 -1
  14. camel/memories/records.py +5 -0
  15. camel/models/__init__.py +2 -0
  16. camel/models/gemini_model.py +36 -0
  17. camel/models/groq_model.py +6 -3
  18. camel/models/model_factory.py +3 -0
  19. camel/models/openrouter_model.py +204 -0
  20. camel/models/stub_model.py +25 -0
  21. camel/retrievers/vector_retriever.py +12 -7
  22. camel/storages/__init__.py +2 -0
  23. camel/storages/key_value_storages/__init__.py +4 -1
  24. camel/storages/key_value_storages/json.py +3 -7
  25. camel/storages/key_value_storages/mem0_cloud.py +224 -0
  26. camel/storages/vectordb_storages/base.py +5 -1
  27. camel/storages/vectordb_storages/qdrant.py +3 -3
  28. camel/toolkits/__init__.py +2 -1
  29. camel/toolkits/browser_toolkit.py +43 -0
  30. camel/toolkits/code_execution.py +2 -1
  31. camel/toolkits/mcp_toolkit.py +30 -1
  32. camel/toolkits/memory_toolkit.py +129 -0
  33. camel/types/enums.py +24 -0
  34. camel/types/unified_model_type.py +5 -0
  35. camel/utils/chunker/__init__.py +22 -0
  36. camel/utils/chunker/base.py +24 -0
  37. camel/utils/chunker/code_chunker.py +193 -0
  38. camel/utils/chunker/uio_chunker.py +66 -0
  39. camel/utils/token_counting.py +133 -0
  40. camel/verifiers/__init__.py +1 -2
  41. camel/verifiers/base.py +133 -96
  42. camel/verifiers/models.py +0 -12
  43. camel/verifiers/python_verifier.py +25 -14
  44. {camel_ai-0.2.34.dist-info → camel_ai-0.2.36.dist-info}/METADATA +3 -1
  45. {camel_ai-0.2.34.dist-info → camel_ai-0.2.36.dist-info}/RECORD +47 -39
  46. {camel_ai-0.2.34.dist-info → camel_ai-0.2.36.dist-info}/WHEEL +0 -0
  47. {camel_ai-0.2.34.dist-info → camel_ai-0.2.36.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,204 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
+
17
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
+ from pydantic import BaseModel
19
+
20
+ from camel.configs import OPENROUTER_API_PARAMS, OpenRouterConfig
21
+ from camel.messages import OpenAIMessage
22
+ from camel.models import BaseModelBackend
23
+ from camel.models._utils import try_modify_message_with_format
24
+ from camel.types import (
25
+ ChatCompletion,
26
+ ChatCompletionChunk,
27
+ ModelType,
28
+ )
29
+ from camel.utils import (
30
+ BaseTokenCounter,
31
+ OpenAITokenCounter,
32
+ api_keys_required,
33
+ )
34
+
35
+
36
+ class OpenRouterModel(BaseModelBackend):
37
+ r"""LLM API served by OpenRouter in a unified BaseModelBackend interface.
38
+
39
+ Args:
40
+ model_type (Union[ModelType, str]): Model for which a backend is
41
+ created.
42
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
43
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
44
+ If:obj:`None`, :obj:`GroqConfig().as_dict()` will be used.
45
+ (default: :obj:`None`)
46
+ api_key (Optional[str], optional): The API key for authenticating
47
+ with the OpenRouter service. (default: :obj:`None`).
48
+ url (Optional[str], optional): The url to the OpenRouter service.
49
+ (default: :obj:`None`)
50
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
51
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
52
+ ModelType.GPT_4O_MINI)` will be used.
53
+ (default: :obj:`None`)
54
+ """
55
+
56
+ @api_keys_required([("api_key", "OPENROUTER_API_KEY")])
57
+ def __init__(
58
+ self,
59
+ model_type: Union[ModelType, str],
60
+ model_config_dict: Optional[Dict[str, Any]] = None,
61
+ api_key: Optional[str] = None,
62
+ url: Optional[str] = None,
63
+ token_counter: Optional[BaseTokenCounter] = None,
64
+ ) -> None:
65
+ if model_config_dict is None:
66
+ model_config_dict = OpenRouterConfig().as_dict()
67
+ api_key = api_key or os.environ.get("OPENROUTER_API_KEY")
68
+ url = url or os.environ.get(
69
+ "OPENROUTER_API_BASE_URL", "https://openrouter.ai/api/v1"
70
+ )
71
+ super().__init__(
72
+ model_type, model_config_dict, api_key, url, token_counter
73
+ )
74
+ self._client = OpenAI(
75
+ timeout=180,
76
+ max_retries=3,
77
+ api_key=self._api_key,
78
+ base_url=self._url,
79
+ )
80
+ self._async_client = AsyncOpenAI(
81
+ timeout=180,
82
+ max_retries=3,
83
+ api_key=self._api_key,
84
+ base_url=self._url,
85
+ )
86
+
87
+ @property
88
+ def token_counter(self) -> BaseTokenCounter:
89
+ r"""Initialize the token counter for the model backend.
90
+
91
+ Returns:
92
+ BaseTokenCounter: The token counter following the model's
93
+ tokenization style.
94
+ """
95
+ if not self._token_counter:
96
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
97
+ return self._token_counter
98
+
99
+ def _prepare_request(
100
+ self,
101
+ messages: List[OpenAIMessage],
102
+ response_format: Optional[Type[BaseModel]] = None,
103
+ tools: Optional[List[Dict[str, Any]]] = None,
104
+ ) -> Dict[str, Any]:
105
+ request_config = self.model_config_dict.copy()
106
+ if tools:
107
+ request_config["tools"] = tools
108
+ elif response_format:
109
+ try_modify_message_with_format(messages[-1], response_format)
110
+ request_config["response_format"] = {"type": "json_object"}
111
+
112
+ return request_config
113
+
114
+ def _run(
115
+ self,
116
+ messages: List[OpenAIMessage],
117
+ response_format: Optional[type[BaseModel]] = None,
118
+ tools: Optional[List[Dict[str, Any]]] = None,
119
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
120
+ r"""Runs inference of OpenAI chat completion.
121
+
122
+ Args:
123
+ messages (List[OpenAIMessage]): Message list with the chat history
124
+ in OpenAI API format.
125
+ response_format (Optional[Type[BaseModel]]): The format of the
126
+ response.
127
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
128
+ use for the request.
129
+
130
+ Returns:
131
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
132
+ `ChatCompletion` in the non-stream mode, or
133
+ `Stream[ChatCompletionChunk]` in the stream mode.
134
+ """
135
+ request_config = self._prepare_request(
136
+ messages, response_format, tools
137
+ )
138
+
139
+ response = self._client.chat.completions.create(
140
+ messages=messages,
141
+ model=self.model_type,
142
+ **request_config,
143
+ )
144
+
145
+ return response
146
+
147
+ async def _arun(
148
+ self,
149
+ messages: List[OpenAIMessage],
150
+ response_format: Optional[type[BaseModel]] = None,
151
+ tools: Optional[List[Dict[str, Any]]] = None,
152
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
153
+ r"""Runs inference of OpenRouter chat completion asynchronously.
154
+
155
+ Args:
156
+ messages (List[OpenAIMessage]): Message list with the chat history
157
+ in OpenAI API format.
158
+ response_format (Optional[Type[BaseModel]]): The format of the
159
+ response.
160
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
161
+ use for the request.
162
+
163
+ Returns:
164
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
165
+ `ChatCompletion` in the non-stream mode, or
166
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
167
+ """
168
+ request_config = self._prepare_request(
169
+ messages, response_format, tools
170
+ )
171
+
172
+ response = await self._async_client.chat.completions.create(
173
+ messages=messages,
174
+ model=self.model_type,
175
+ **request_config,
176
+ )
177
+
178
+ return response
179
+
180
+ def check_model_config(self):
181
+ r"""Check whether the model configuration contains any unexpected
182
+ arguments to OpenRouter API. But OpenRouter API does not have any
183
+ additional arguments to check.
184
+
185
+ Raises:
186
+ ValueError: If the model configuration dictionary contains any
187
+ unexpected arguments to OpenRouter API.
188
+ """
189
+ for param in self.model_config_dict:
190
+ if param not in OPENROUTER_API_PARAMS:
191
+ raise ValueError(
192
+ f"Unexpected argument `{param}` is "
193
+ "input into OpenRouter model backend."
194
+ )
195
+
196
+ @property
197
+ def stream(self) -> bool:
198
+ r"""Returns whether the model is in stream mode, which sends partial
199
+ results each time.
200
+
201
+ Returns:
202
+ bool: Whether the model is in stream mode.
203
+ """
204
+ return self.model_config_dict.get("stream", False)
@@ -44,6 +44,31 @@ class StubTokenCounter(BaseTokenCounter):
44
44
  """
45
45
  return 10
46
46
 
47
+ def encode(self, text: str) -> List[int]:
48
+ r"""Encode text into token IDs for STUB models.
49
+
50
+ Args:
51
+ text (str): The text to encode.
52
+
53
+ Returns:
54
+ List[int]: List of token IDs.
55
+ """
56
+ # For stub models, just return a list of 0s with length proportional
57
+ # to text length
58
+ return [0] * (len(text) // 4 + 1) # Simple approximation
59
+
60
+ def decode(self, token_ids: List[int]) -> str:
61
+ r"""Decode token IDs back to text for STUB models.
62
+
63
+ Args:
64
+ token_ids (List[int]): List of token IDs to decode.
65
+
66
+ Returns:
67
+ str: Decoded text.
68
+ """
69
+ # For stub models, return a placeholder string
70
+ return "[Stub decoded text]"
71
+
47
72
 
48
73
  class StubModel(BaseModelBackend):
49
74
  r"""A dummy model used for unit tests."""
@@ -27,6 +27,7 @@ from camel.storages import (
27
27
  VectorRecord,
28
28
  )
29
29
  from camel.utils import Constants
30
+ from camel.utils.chunker import BaseChunker, UnstructuredIOChunker
30
31
 
31
32
  if TYPE_CHECKING:
32
33
  from unstructured.documents.elements import Element
@@ -78,6 +79,7 @@ class VectorRetriever(BaseRetriever):
78
79
  should_chunk: bool = True,
79
80
  extra_info: Optional[dict] = None,
80
81
  metadata_filename: Optional[str] = None,
82
+ chunker: Optional[BaseChunker] = None,
81
83
  **kwargs: Any,
82
84
  ) -> None:
83
85
  r"""Processes content from local file path, remote URL, string
@@ -101,6 +103,12 @@ class VectorRetriever(BaseRetriever):
101
103
  used for storing metadata. Defaults to None.
102
104
  **kwargs (Any): Additional keyword arguments for content parsing.
103
105
  """
106
+ if chunker is None:
107
+ chunker = UnstructuredIOChunker(
108
+ chunk_type=chunk_type,
109
+ max_characters=max_characters,
110
+ metadata_filename=metadata_filename,
111
+ )
104
112
  from unstructured.documents.elements import Element
105
113
 
106
114
  if isinstance(content, Element):
@@ -140,13 +148,7 @@ class VectorRetriever(BaseRetriever):
140
148
  else:
141
149
  # Chunk the content if required
142
150
  chunks = (
143
- self.uio.chunk_elements(
144
- chunk_type=chunk_type,
145
- elements=elements,
146
- max_characters=max_characters,
147
- )
148
- if should_chunk
149
- else elements
151
+ chunker.chunk(content=elements) if should_chunk else (elements)
150
152
  )
151
153
 
152
154
  # Process chunks in batches and store embeddings
@@ -157,6 +159,7 @@ class VectorRetriever(BaseRetriever):
157
159
  )
158
160
 
159
161
  records = []
162
+ offset = 0
160
163
  # Prepare the payload for each vector record, includes the
161
164
  # content path, chunk metadata, and chunk text
162
165
  for vector, chunk in zip(batch_vectors, batch_chunks):
@@ -178,6 +181,7 @@ class VectorRetriever(BaseRetriever):
178
181
  chunk_metadata["metadata"].pop("orig_elements", "")
179
182
  chunk_metadata["extra_info"] = extra_info or {}
180
183
  chunk_text = {"text": str(chunk)}
184
+ chunk_metadata["metadata"]["piece_num"] = i + offset + 1
181
185
  combined_dict = {
182
186
  **content_path_info,
183
187
  **chunk_metadata,
@@ -187,6 +191,7 @@ class VectorRetriever(BaseRetriever):
187
191
  records.append(
188
192
  VectorRecord(vector=vector, payload=combined_dict)
189
193
  )
194
+ offset += 1
190
195
 
191
196
  self.storage.add(records=records)
192
197
 
@@ -18,6 +18,7 @@ from .graph_storages.neo4j_graph import Neo4jGraph
18
18
  from .key_value_storages.base import BaseKeyValueStorage
19
19
  from .key_value_storages.in_memory import InMemoryKeyValueStorage
20
20
  from .key_value_storages.json import JsonStorage
21
+ from .key_value_storages.mem0_cloud import Mem0Storage
21
22
  from .key_value_storages.redis import RedisStorage
22
23
  from .vectordb_storages.base import (
23
24
  BaseVectorStorage,
@@ -42,4 +43,5 @@ __all__ = [
42
43
  'BaseGraphStorage',
43
44
  'Neo4jGraph',
44
45
  'NebulaGraph',
46
+ 'Mem0Storage',
45
47
  ]
@@ -14,7 +14,8 @@
14
14
 
15
15
  from .base import BaseKeyValueStorage
16
16
  from .in_memory import InMemoryKeyValueStorage
17
- from .json import JsonStorage
17
+ from .json import CamelJSONEncoder, JsonStorage
18
+ from .mem0_cloud import Mem0Storage
18
19
  from .redis import RedisStorage
19
20
 
20
21
  __all__ = [
@@ -22,4 +23,6 @@ __all__ = [
22
23
  'InMemoryKeyValueStorage',
23
24
  'JsonStorage',
24
25
  'RedisStorage',
26
+ 'CamelJSONEncoder',
27
+ 'Mem0Storage',
25
28
  ]
@@ -26,7 +26,7 @@ from camel.types import (
26
26
  )
27
27
 
28
28
 
29
- class _CamelJSONEncoder(json.JSONEncoder):
29
+ class CamelJSONEncoder(json.JSONEncoder):
30
30
  r"""A custom JSON encoder for serializing specifically enumerated types.
31
31
  Ensures enumerated types can be stored in and retrieved from JSON format.
32
32
  """
@@ -62,7 +62,7 @@ class JsonStorage(BaseKeyValueStorage):
62
62
  def _json_object_hook(self, d) -> Any:
63
63
  if "__enum__" in d:
64
64
  name, member = d["__enum__"].split(".")
65
- return getattr(_CamelJSONEncoder.CAMEL_ENUMS[name], member)
65
+ return getattr(CamelJSONEncoder.CAMEL_ENUMS[name], member)
66
66
  else:
67
67
  return d
68
68
 
@@ -75,11 +75,7 @@ class JsonStorage(BaseKeyValueStorage):
75
75
  """
76
76
  with self.json_path.open("a") as f:
77
77
  f.writelines(
78
- [
79
- json.dumps(r, cls=_CamelJSONEncoder, ensure_ascii=False)
80
- + "\n"
81
- for r in records
82
- ]
78
+ [json.dumps(r, cls=CamelJSONEncoder) + "\n" for r in records]
83
79
  )
84
80
 
85
81
  def load(self) -> List[Dict[str, Any]]:
@@ -0,0 +1,224 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import logging
16
+ import os
17
+ from datetime import datetime
18
+ from typing import Any, Dict, List, Optional
19
+ from uuid import UUID
20
+
21
+ from camel.memories.records import MemoryRecord
22
+ from camel.messages import BaseMessage
23
+ from camel.storages.key_value_storages import BaseKeyValueStorage
24
+ from camel.types import OpenAIBackendRole, RoleType
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ class Mem0Storage(BaseKeyValueStorage):
30
+ r"""A concrete implementation of the :obj:`BaseKeyValueStorage` using Mem0
31
+ as the backend. This storage system uses Mem0's text capabilities to store,
32
+ search, and manage text with context.
33
+
34
+ Args:
35
+ agent_id (str): Default agent ID to associate memories with.
36
+ api_key (str, optional): The API key for authentication. If not
37
+ provided, will try to get from environment variable MEM0_API_KEY
38
+ (default: :obj:`None`).
39
+ user_id (str, optional): Default user ID to associate memories with
40
+ (default: :obj:`None`).
41
+ metadata (Dict[str, Any], optional): Default metadata to include with
42
+ all memories (default: :obj:`None`).
43
+
44
+ References:
45
+ https://docs.mem0.ai
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ agent_id: str,
51
+ api_key: Optional[str] = None,
52
+ user_id: Optional[str] = None,
53
+ metadata: Optional[Dict[str, Any]] = None,
54
+ ) -> None:
55
+ try:
56
+ from mem0 import MemoryClient
57
+ except ImportError as exc:
58
+ logger.error(
59
+ "Please install `mem0` first. You can install it by "
60
+ "running `pip install mem0ai`."
61
+ )
62
+ raise exc
63
+
64
+ self.api_key = api_key or os.getenv("MEM0_API_KEY")
65
+ if not self.api_key:
66
+ raise ValueError(
67
+ "API key must be provided either through constructor "
68
+ "or MEM0_API_KEY environment variable."
69
+ )
70
+
71
+ self.client = MemoryClient(api_key=self.api_key)
72
+ self.agent_id = agent_id
73
+ self.user_id = user_id
74
+ self.metadata = metadata or {}
75
+
76
+ def _prepare_options(
77
+ self,
78
+ agent_id: Optional[str] = None,
79
+ user_id: Optional[str] = None,
80
+ metadata: Optional[Dict[str, Any]] = None,
81
+ **kwargs: Any,
82
+ ) -> Dict[str, Any]:
83
+ r"""Helper method to prepare options for Mem0 API calls.
84
+
85
+ Args:
86
+ agent_id (Optional[str], optional): Agent ID to use (default:
87
+ :obj:`None`).
88
+ user_id (Optional[str], optional): User ID to use (default:
89
+ :obj:`None`).
90
+ metadata (Optional[Dict[str, Any]], optional): Additional metadata
91
+ to include (default: :obj:`None`).
92
+ **kwargs (Any): Additional keyword arguments.
93
+
94
+ Returns:
95
+ Dict[str, Any]: Prepared options dictionary for API calls.
96
+ """
97
+ options = {
98
+ "agent_id": agent_id or self.agent_id,
99
+ "user_id": user_id or self.user_id,
100
+ "metadata": {**self.metadata, **(metadata or {})},
101
+ "output_format": "v1.1",
102
+ **kwargs,
103
+ }
104
+ return {k: v for k, v in options.items() if v is not None}
105
+
106
+ def _prepare_filters(
107
+ self,
108
+ agent_id: Optional[str] = None,
109
+ user_id: Optional[str] = None,
110
+ filters: Optional[Dict[str, Any]] = None,
111
+ ) -> Dict[str, Any]:
112
+ r"""Helper method to prepare filters for Mem0 API calls.
113
+
114
+ Args:
115
+ agent_id (Optional[str], optional): Agent ID to filter by
116
+ (default: :obj:`None`).
117
+ user_id (Optional[str], optional): User ID to filter by (default:
118
+ :obj:`None`).
119
+ filters (Optional[Dict[str, Any]], optional): Additional filters
120
+ (default: :obj:`None`).
121
+
122
+ Returns:
123
+ Dict[str, Any]: Prepared filters dictionary for API calls.
124
+ """
125
+ base_filters: Dict[str, Any] = {"AND": []}
126
+ if filters:
127
+ base_filters["AND"].append(filters)
128
+ if agent_id or self.agent_id:
129
+ base_filters["AND"].append({"agent_id": agent_id or self.agent_id})
130
+ if user_id or self.user_id:
131
+ base_filters["AND"].append({"user_id": user_id or self.user_id})
132
+ return base_filters if base_filters["AND"] else {}
133
+
134
+ def _prepare_messages(
135
+ self,
136
+ records: List[Dict[str, Any]],
137
+ ) -> List[Dict[str, Any]]:
138
+ r"""Prepare messages from records for Mem0 API calls.
139
+
140
+ Args:
141
+ records (List[Dict[str, Any]]): List of record dictionaries.
142
+
143
+ Returns:
144
+ List[Dict[str, Any]]: List of prepared message dictionaries.
145
+ """
146
+ messages = []
147
+ for record in records:
148
+ content = record["message"]["content"]
149
+ role = record["role_at_backend"].value
150
+ messages.append({"role": role, "content": content})
151
+ return messages
152
+
153
+ def save(self, records: List[Dict[str, Any]]) -> None:
154
+ r"""Saves a batch of records to the Mem0 storage system.
155
+
156
+ Args:
157
+ records (List[Dict[str, Any]]): A list of dictionaries, where each
158
+ dictionary represents a unique record to be stored.
159
+ """
160
+ try:
161
+ messages = self._prepare_messages(records)
162
+
163
+ options = self._prepare_options(
164
+ agent_id=self.agent_id,
165
+ user_id=self.user_id,
166
+ metadata=self.metadata,
167
+ )
168
+ self.client.add(messages, **options)
169
+ except Exception as e:
170
+ logger.error(f"Error adding memory: {e}")
171
+ logger.error(f"Error: {e}")
172
+
173
+ def load(self) -> List[Dict[str, Any]]:
174
+ r"""Loads all stored records from the Mem0 storage system.
175
+
176
+ Returns:
177
+ List[Dict[str, Any]]: A list of dictionaries, where each dictionary
178
+ represents a stored record.
179
+ """
180
+ try:
181
+ filters = self._prepare_filters(
182
+ agent_id=self.agent_id,
183
+ user_id=self.user_id,
184
+ )
185
+ results = self.client.get_all(version="v2", **filters)
186
+
187
+ # Transform results into MemoryRecord objects
188
+ transformed_results = []
189
+ for result in results:
190
+ memory_record = MemoryRecord(
191
+ uuid=UUID(result["id"]),
192
+ message=BaseMessage(
193
+ role_name="user",
194
+ role_type=RoleType.USER,
195
+ meta_dict={},
196
+ content=result["memory"],
197
+ ),
198
+ role_at_backend=OpenAIBackendRole.USER,
199
+ extra_info=result.get("metadata", {}),
200
+ timestamp=datetime.fromisoformat(
201
+ result["created_at"]
202
+ ).timestamp(),
203
+ agent_id=result.get("agent_id", ""),
204
+ )
205
+ transformed_results.append(memory_record.to_dict())
206
+
207
+ return transformed_results
208
+ except Exception as e:
209
+ logger.error(f"Error searching memories: {e}")
210
+ return []
211
+
212
+ def clear(
213
+ self,
214
+ ) -> None:
215
+ r"""Removes all records from the Mem0 storage system."""
216
+ try:
217
+ filters = self._prepare_filters(
218
+ agent_id=self.agent_id,
219
+ user_id=self.user_id,
220
+ )
221
+ self.client.delete_users(**filters)
222
+ except Exception as e:
223
+ logger.error(f"Error deleting memories: {e}")
224
+ logger.error(f"Error: {e}")
@@ -87,7 +87,11 @@ class VectorDBQueryResult(BaseModel):
87
87
  ) -> "VectorDBQueryResult":
88
88
  r"""A class method to construct a `VectorDBQueryResult` instance."""
89
89
  return cls(
90
- record=VectorRecord(vector=vector, id=id, payload=payload),
90
+ record=VectorRecord(
91
+ vector=vector,
92
+ id=id,
93
+ payload=payload,
94
+ ),
91
95
  similarity=similarity,
92
96
  )
93
97
 
@@ -450,9 +450,9 @@ class QdrantStorage(BaseVectorStorage):
450
450
  search_filter = Filter(must=cast(List[Condition], must_conditions))
451
451
 
452
452
  # Execute the search with optional filter
453
- search_result = self._client.search(
453
+ search_result = self._client.query_points(
454
454
  collection_name=self.collection_name,
455
- query_vector=query.query_vector,
455
+ query=query.query_vector,
456
456
  with_payload=True,
457
457
  with_vectors=True,
458
458
  limit=query.top_k,
@@ -467,7 +467,7 @@ class QdrantStorage(BaseVectorStorage):
467
467
  payload=point.payload,
468
468
  vector=point.vector, # type: ignore[arg-type]
469
469
  )
470
- for point in search_result
470
+ for point in search_result.points
471
471
  ]
472
472
 
473
473
  return query_results
@@ -50,6 +50,7 @@ from .semantic_scholar_toolkit import SemanticScholarToolkit
50
50
  from .zapier_toolkit import ZapierToolkit
51
51
  from .sympy_toolkit import SymPyToolkit
52
52
  from .mineru_toolkit import MinerUToolkit
53
+ from .memory_toolkit import MemoryToolkit
53
54
  from .audio_analysis_toolkit import AudioAnalysisToolkit
54
55
  from .excel_toolkit import ExcelToolkit
55
56
  from .video_analysis_toolkit import VideoAnalysisToolkit
@@ -60,7 +61,6 @@ from .file_write_toolkit import FileWriteToolkit
60
61
  from .terminal_toolkit import TerminalToolkit
61
62
  from .pubmed_toolkit import PubMedToolkit
62
63
 
63
-
64
64
  __all__ = [
65
65
  'BaseToolkit',
66
66
  'FunctionTool',
@@ -97,6 +97,7 @@ __all__ = [
97
97
  'ZapierToolkit',
98
98
  'SymPyToolkit',
99
99
  'MinerUToolkit',
100
+ 'MemoryToolkit',
100
101
  'MCPToolkit',
101
102
  'MCPToolkitManager',
102
103
  'AudioAnalysisToolkit',