camel-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -11
- camel/agents/__init__.py +5 -5
- camel/agents/chat_agent.py +124 -63
- camel/agents/critic_agent.py +28 -17
- camel/agents/deductive_reasoner_agent.py +235 -0
- camel/agents/embodied_agent.py +92 -40
- camel/agents/role_assignment_agent.py +27 -17
- camel/agents/task_agent.py +60 -34
- camel/agents/tool_agents/base.py +0 -1
- camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
- camel/configs.py +119 -7
- camel/embeddings/__init__.py +2 -0
- camel/embeddings/base.py +3 -2
- camel/embeddings/openai_embedding.py +3 -3
- camel/embeddings/sentence_transformers_embeddings.py +65 -0
- camel/functions/__init__.py +13 -3
- camel/functions/google_maps_function.py +335 -0
- camel/functions/math_functions.py +7 -7
- camel/functions/openai_function.py +344 -42
- camel/functions/search_functions.py +100 -35
- camel/functions/twitter_function.py +484 -0
- camel/functions/weather_functions.py +36 -23
- camel/generators.py +65 -46
- camel/human.py +17 -11
- camel/interpreters/__init__.py +25 -0
- camel/interpreters/base.py +49 -0
- camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
- camel/interpreters/interpreter_error.py +19 -0
- camel/interpreters/subprocess_interpreter.py +190 -0
- camel/loaders/__init__.py +22 -0
- camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
- camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
- camel/memories/__init__.py +17 -7
- camel/memories/agent_memories.py +156 -0
- camel/memories/base.py +97 -32
- camel/memories/blocks/__init__.py +21 -0
- camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
- camel/memories/blocks/vectordb_block.py +101 -0
- camel/memories/context_creators/__init__.py +3 -2
- camel/memories/context_creators/score_based.py +32 -20
- camel/memories/records.py +6 -5
- camel/messages/__init__.py +2 -2
- camel/messages/base.py +99 -16
- camel/messages/func_message.py +7 -4
- camel/models/__init__.py +4 -2
- camel/models/anthropic_model.py +132 -0
- camel/models/base_model.py +3 -2
- camel/models/model_factory.py +10 -8
- camel/models/open_source_model.py +25 -13
- camel/models/openai_model.py +9 -10
- camel/models/stub_model.py +6 -5
- camel/prompts/__init__.py +7 -5
- camel/prompts/ai_society.py +21 -14
- camel/prompts/base.py +54 -47
- camel/prompts/code.py +22 -14
- camel/prompts/evaluation.py +8 -5
- camel/prompts/misalignment.py +26 -19
- camel/prompts/object_recognition.py +35 -0
- camel/prompts/prompt_templates.py +14 -8
- camel/prompts/role_description_prompt_template.py +16 -10
- camel/prompts/solution_extraction.py +9 -5
- camel/prompts/task_prompt_template.py +24 -21
- camel/prompts/translation.py +9 -5
- camel/responses/agent_responses.py +5 -2
- camel/retrievers/__init__.py +24 -0
- camel/retrievers/auto_retriever.py +319 -0
- camel/retrievers/base.py +64 -0
- camel/retrievers/bm25_retriever.py +149 -0
- camel/retrievers/vector_retriever.py +166 -0
- camel/societies/__init__.py +1 -1
- camel/societies/babyagi_playing.py +56 -32
- camel/societies/role_playing.py +188 -133
- camel/storages/__init__.py +18 -0
- camel/storages/graph_storages/__init__.py +23 -0
- camel/storages/graph_storages/base.py +82 -0
- camel/storages/graph_storages/graph_element.py +74 -0
- camel/storages/graph_storages/neo4j_graph.py +582 -0
- camel/storages/key_value_storages/base.py +1 -2
- camel/storages/key_value_storages/in_memory.py +1 -2
- camel/storages/key_value_storages/json.py +8 -13
- camel/storages/vectordb_storages/__init__.py +33 -0
- camel/storages/vectordb_storages/base.py +202 -0
- camel/storages/vectordb_storages/milvus.py +396 -0
- camel/storages/vectordb_storages/qdrant.py +371 -0
- camel/terminators/__init__.py +1 -1
- camel/terminators/base.py +2 -3
- camel/terminators/response_terminator.py +21 -12
- camel/terminators/token_limit_terminator.py +5 -3
- camel/types/__init__.py +12 -6
- camel/types/enums.py +86 -13
- camel/types/openai_types.py +10 -5
- camel/utils/__init__.py +18 -13
- camel/utils/commons.py +242 -81
- camel/utils/token_counting.py +135 -15
- {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/METADATA +116 -74
- camel_ai-0.1.3.dist-info/RECORD +101 -0
- {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/WHEEL +1 -1
- camel/memories/context_creators/base.py +0 -72
- camel_ai-0.1.1.dist-info/RECORD +0 -75
camel/memories/base.py
CHANGED
|
@@ -15,42 +15,19 @@
|
|
|
15
15
|
from abc import ABC, abstractmethod
|
|
16
16
|
from typing import List, Tuple
|
|
17
17
|
|
|
18
|
-
from camel.memories import MemoryRecord
|
|
18
|
+
from camel.memories.records import ContextRecord, MemoryRecord
|
|
19
19
|
from camel.messages import OpenAIMessage
|
|
20
|
+
from camel.utils import BaseTokenCounter
|
|
20
21
|
|
|
21
22
|
|
|
22
|
-
class
|
|
23
|
-
r"""An abstract
|
|
24
|
-
memory
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
operations. Every memory system should incorporate at least one instance of
|
|
29
|
-
a subclass derived from :obj:`BaseMemory`.
|
|
30
|
-
|
|
31
|
-
These instances, known as "memories", typically communicate using the
|
|
32
|
-
:obj:`MemoryRecord` object. Usually, a memory has at least one "storage"
|
|
33
|
-
mechanism, allowing it to interface with various storage systems, such as
|
|
34
|
-
disks or vector databases. Additionally, some memories might embed other
|
|
35
|
-
memory instances, enabling them to function as a high-level controller
|
|
36
|
-
within the broader memory system.
|
|
37
|
-
|
|
38
|
-
By default, when executing the :obj:`step()` method, an agent retrieves
|
|
39
|
-
messages from its designated memory and combines them with an incoming
|
|
40
|
-
message for input to the agent. Subsequently, both the response message and
|
|
41
|
-
the incoming messages are archived back into the memory.
|
|
23
|
+
class MemoryBlock(ABC):
|
|
24
|
+
r"""An abstract class serves as the fundamental component within the agent
|
|
25
|
+
memory system. This class is equipped with "write" and "clear" functions.
|
|
26
|
+
However, it intentionally does not define a retrieval interface, as the
|
|
27
|
+
structure of the data to be retrieved may vary in different types of
|
|
28
|
+
memory blocks.
|
|
42
29
|
"""
|
|
43
30
|
|
|
44
|
-
@abstractmethod
|
|
45
|
-
def get_context(self) -> Tuple[List[OpenAIMessage], int]:
|
|
46
|
-
r"""Gets chat context with a proper size for the agent.
|
|
47
|
-
|
|
48
|
-
Returns:
|
|
49
|
-
(List[OpenAIMessage], int): A tuple containing the constructed
|
|
50
|
-
context in OpenAIMessage format and the total token count.
|
|
51
|
-
"""
|
|
52
|
-
pass
|
|
53
|
-
|
|
54
31
|
@abstractmethod
|
|
55
32
|
def write_records(self, records: List[MemoryRecord]) -> None:
|
|
56
33
|
r"""Writes records to the memory, appending them to existing ones.
|
|
@@ -70,6 +47,94 @@ class BaseMemory(ABC):
|
|
|
70
47
|
|
|
71
48
|
@abstractmethod
|
|
72
49
|
def clear(self) -> None:
|
|
73
|
-
r"""Clears all messages from the memory.
|
|
50
|
+
r"""Clears all messages from the memory."""
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class BaseContextCreator(ABC):
|
|
55
|
+
r"""An abstract base class defining the interface for context creation
|
|
56
|
+
strategies.
|
|
57
|
+
|
|
58
|
+
This class provides a foundational structure for different strategies to
|
|
59
|
+
generate conversational context from a list of context records. The
|
|
60
|
+
primary goal is to create a context that is aligned with a specified token
|
|
61
|
+
count limit, allowing subclasses to define their specific approach.
|
|
62
|
+
|
|
63
|
+
Subclasses should implement the :obj:`token_counter`,:obj: `token_limit`,
|
|
64
|
+
and :obj:`create_context` methods to provide specific context creation
|
|
65
|
+
logic.
|
|
66
|
+
|
|
67
|
+
Attributes:
|
|
68
|
+
token_counter (BaseTokenCounter): A token counter instance responsible
|
|
69
|
+
for counting tokens in a message.
|
|
70
|
+
token_limit (int): The maximum number of tokens allowed in the
|
|
71
|
+
generated context.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
@abstractmethod
|
|
76
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
77
|
+
pass
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
@abstractmethod
|
|
81
|
+
def token_limit(self) -> int:
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
@abstractmethod
|
|
85
|
+
def create_context(
|
|
86
|
+
self,
|
|
87
|
+
records: List[ContextRecord],
|
|
88
|
+
) -> Tuple[List[OpenAIMessage], int]:
|
|
89
|
+
r"""An abstract method to create conversational context from the chat
|
|
90
|
+
history.
|
|
91
|
+
|
|
92
|
+
Constructs the context from provided records. The specifics of how this
|
|
93
|
+
is done and how the token count is managed should be provided by
|
|
94
|
+
subclasses implementing this method. The output messages order
|
|
95
|
+
should keep same as the input order.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
records (List[ContextRecord]): A list of context records from
|
|
99
|
+
which to generate the context.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Tuple[List[OpenAIMessage], int]: A tuple containing the constructed
|
|
103
|
+
context in OpenAIMessage format and the total token count.
|
|
104
|
+
"""
|
|
105
|
+
pass
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class AgentMemory(MemoryBlock, ABC):
|
|
109
|
+
r"""Represents a specialized form of `MemoryBlock`, uniquely designed for
|
|
110
|
+
direct integration with an agent. Two key abstract functions, "retrieve"
|
|
111
|
+
and "get_context_creator", are used for generating model context based on
|
|
112
|
+
the memory records stored within the AgentMemory.
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
@abstractmethod
|
|
116
|
+
def retrieve(self) -> List[ContextRecord]:
|
|
117
|
+
r"""Get a record list from the memory for creating model context.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
List[ContextRecord]: A record list for creating model context.
|
|
74
121
|
"""
|
|
75
122
|
pass
|
|
123
|
+
|
|
124
|
+
@abstractmethod
|
|
125
|
+
def get_context_creator(self) -> BaseContextCreator:
|
|
126
|
+
r"""Gets context creator.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
BaseContextCreator: A model context creator.
|
|
130
|
+
"""
|
|
131
|
+
pass
|
|
132
|
+
|
|
133
|
+
def get_context(self) -> Tuple[List[OpenAIMessage], int]:
|
|
134
|
+
r"""Gets chat context with a proper size for the agent from the memory.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
(List[OpenAIMessage], int): A tuple containing the constructed
|
|
138
|
+
context in OpenAIMessage format and the total token count.
|
|
139
|
+
"""
|
|
140
|
+
return self.get_context_creator().create_context(self.retrieve())
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
from .chat_history_block import ChatHistoryBlock
|
|
16
|
+
from .vectordb_block import VectorDBBlock
|
|
17
|
+
|
|
18
|
+
__all__ = [
|
|
19
|
+
'ChatHistoryBlock',
|
|
20
|
+
'VectorDBBlock',
|
|
21
|
+
]
|
|
@@ -11,68 +11,69 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
|
|
14
|
+
import warnings
|
|
15
|
+
from typing import List, Optional
|
|
15
16
|
|
|
16
|
-
from camel.memories import
|
|
17
|
-
from camel.memories.
|
|
18
|
-
from camel.messages import OpenAIMessage
|
|
17
|
+
from camel.memories.base import MemoryBlock
|
|
18
|
+
from camel.memories.records import ContextRecord, MemoryRecord
|
|
19
19
|
from camel.storages import BaseKeyValueStorage, InMemoryKeyValueStorage
|
|
20
20
|
from camel.types import OpenAIBackendRole
|
|
21
21
|
|
|
22
22
|
|
|
23
|
-
class
|
|
24
|
-
r"""An implementation of the :obj:`
|
|
23
|
+
class ChatHistoryBlock(MemoryBlock):
|
|
24
|
+
r"""An implementation of the :obj:`MemoryBlock` abstract base class for
|
|
25
25
|
maintaining a record of chat histories.
|
|
26
26
|
|
|
27
|
-
This memory
|
|
28
|
-
storage
|
|
27
|
+
This memory block helps manage conversation histories with a key-value
|
|
28
|
+
storage backend, either provided by the user or using a default
|
|
29
29
|
in-memory storage. It offers a windowed approach to retrieving chat
|
|
30
30
|
histories, allowing users to specify how many recent messages they'd
|
|
31
31
|
like to fetch.
|
|
32
32
|
|
|
33
|
-
`ChatHistoryMemory` requires messages to be stored with certain
|
|
34
|
-
metadata (e.g., `role_at_backend`) to maintain consistency and validate
|
|
35
|
-
the chat history.
|
|
36
|
-
|
|
37
33
|
Args:
|
|
38
|
-
context_creator (BaseContextCreator): A context creator contianing
|
|
39
|
-
the context limit and the message pruning strategy.
|
|
40
34
|
storage (BaseKeyValueStorage, optional): A storage mechanism for
|
|
41
35
|
storing chat history. If `None`, an :obj:`InMemoryKeyValueStorage`
|
|
42
36
|
will be used. (default: :obj:`None`)
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
37
|
+
keep_rate (float, optional): In historical messages, the score of the
|
|
38
|
+
last message is 1.0, and with each step taken backward, the score
|
|
39
|
+
of the message is multiplied by the `keep_rate`. Higher `keep_rate`
|
|
40
|
+
leads to high possiblity to keep history messages during context
|
|
41
|
+
creation.
|
|
46
42
|
"""
|
|
47
43
|
|
|
48
44
|
def __init__(
|
|
49
45
|
self,
|
|
50
|
-
context_creator: BaseContextCreator,
|
|
51
46
|
storage: Optional[BaseKeyValueStorage] = None,
|
|
52
|
-
|
|
47
|
+
keep_rate: float = 0.9,
|
|
53
48
|
) -> None:
|
|
54
|
-
|
|
49
|
+
if keep_rate > 1 or keep_rate < 0:
|
|
50
|
+
raise ValueError("`keep_rate` should be in [0,1]")
|
|
55
51
|
self.storage = storage or InMemoryKeyValueStorage()
|
|
56
|
-
self.
|
|
52
|
+
self.keep_rate = keep_rate
|
|
57
53
|
|
|
58
|
-
def
|
|
59
|
-
|
|
54
|
+
def retrieve(
|
|
55
|
+
self,
|
|
56
|
+
window_size: Optional[int] = None,
|
|
57
|
+
) -> List[ContextRecord]:
|
|
58
|
+
r"""Retrieves records with a proper size for the agent from the memory
|
|
60
59
|
based on the window size or fetches the entire chat history if no
|
|
61
60
|
window size is specified.
|
|
62
61
|
|
|
62
|
+
Args:
|
|
63
|
+
window_size (int, optional): Specifies the number of recent chat
|
|
64
|
+
messages to retrieve. If not provided, the entire chat history
|
|
65
|
+
will be retrieved. (default: :obj:`None`)
|
|
66
|
+
|
|
63
67
|
Returns:
|
|
64
|
-
|
|
65
|
-
context in OpenAIMessage format and the total token count.
|
|
66
|
-
Raises:
|
|
67
|
-
ValueError: If the memory is empty or if the first message in the
|
|
68
|
-
memory is not a system message.
|
|
68
|
+
List[ContextRecord]: A list of retrieved records.
|
|
69
69
|
"""
|
|
70
70
|
record_dicts = self.storage.load()
|
|
71
71
|
if len(record_dicts) == 0:
|
|
72
|
-
|
|
72
|
+
warnings.warn("The `ChatHistoryMemory` is empty.")
|
|
73
|
+
return list()
|
|
73
74
|
|
|
74
75
|
chat_records: List[MemoryRecord] = []
|
|
75
|
-
truncate_idx = -
|
|
76
|
+
truncate_idx = -window_size if window_size is not None else 0
|
|
76
77
|
for record_dict in record_dicts[truncate_idx:]:
|
|
77
78
|
chat_records.append(MemoryRecord.from_dict(record_dict))
|
|
78
79
|
|
|
@@ -86,11 +87,11 @@ class ChatHistoryMemory(BaseMemory):
|
|
|
86
87
|
output_records.append(ContextRecord(record, 1.0))
|
|
87
88
|
else:
|
|
88
89
|
# Other messages' score drops down gradually
|
|
89
|
-
score *=
|
|
90
|
+
score *= self.keep_rate
|
|
90
91
|
output_records.append(ContextRecord(record, score))
|
|
91
92
|
|
|
92
93
|
output_records.reverse()
|
|
93
|
-
return
|
|
94
|
+
return output_records
|
|
94
95
|
|
|
95
96
|
def write_records(self, records: List[MemoryRecord]) -> None:
|
|
96
97
|
r"""Writes memory records to the memory. Additionally, performs
|
|
@@ -106,6 +107,5 @@ class ChatHistoryMemory(BaseMemory):
|
|
|
106
107
|
self.storage.save(stored_records)
|
|
107
108
|
|
|
108
109
|
def clear(self) -> None:
|
|
109
|
-
r"""Clears all chat messages from the memory.
|
|
110
|
-
"""
|
|
110
|
+
r"""Clears all chat messages from the memory."""
|
|
111
111
|
self.storage.clear()
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
from typing import List, Optional
|
|
16
|
+
|
|
17
|
+
from camel.embeddings import BaseEmbedding, OpenAIEmbedding
|
|
18
|
+
from camel.memories.base import MemoryBlock
|
|
19
|
+
from camel.memories.records import ContextRecord, MemoryRecord
|
|
20
|
+
from camel.storages.vectordb_storages import (
|
|
21
|
+
BaseVectorStorage,
|
|
22
|
+
QdrantStorage,
|
|
23
|
+
VectorDBQuery,
|
|
24
|
+
VectorRecord,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class VectorDBBlock(MemoryBlock):
|
|
29
|
+
r"""An implementation of the :obj:`MemoryBlock` abstract base class for
|
|
30
|
+
maintaining and retrieving information using vector embeddings within a
|
|
31
|
+
vector database.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
storage (Optional[BaseVectorStorage], optional): The storage mechanism
|
|
35
|
+
for the vector database. Defaults to in-memory :obj:`Qdrant` if not
|
|
36
|
+
provided. (default: :obj:`None`)
|
|
37
|
+
embedding (Optional[BaseEmbedding], optional): Embedding mechanism to
|
|
38
|
+
convert chat messages into vector representations. Defaults to
|
|
39
|
+
:obj:`OpenAiEmbedding` if not provided. (default: :obj:`None`)
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
storage: Optional[BaseVectorStorage] = None,
|
|
45
|
+
embedding: Optional[BaseEmbedding] = None,
|
|
46
|
+
) -> None:
|
|
47
|
+
self.embedding = embedding or OpenAIEmbedding()
|
|
48
|
+
self.vector_dim = self.embedding.get_output_dim()
|
|
49
|
+
self.storage = storage or QdrantStorage(vector_dim=self.vector_dim)
|
|
50
|
+
|
|
51
|
+
def retrieve(
|
|
52
|
+
self,
|
|
53
|
+
keyword: str,
|
|
54
|
+
limit: int = 3,
|
|
55
|
+
) -> List[ContextRecord]:
|
|
56
|
+
r"""Retrieves similar records from the vector database based on the
|
|
57
|
+
content of the keyword.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
keyword (str): This string will be converted into a vector
|
|
61
|
+
representation to query the database.
|
|
62
|
+
limit (int, optional): The maximum number of similar messages to
|
|
63
|
+
retrieve. (default: :obj:`3`).
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
List[ContextRecord]: A list of memory records retrieved from the
|
|
67
|
+
vector database based on similarity to :obj:`current_state`.
|
|
68
|
+
"""
|
|
69
|
+
query_vector = self.embedding.embed(keyword)
|
|
70
|
+
results = self.storage.query(VectorDBQuery(query_vector, top_k=limit))
|
|
71
|
+
return [
|
|
72
|
+
ContextRecord(
|
|
73
|
+
memory_record=MemoryRecord.from_dict(result.record.payload),
|
|
74
|
+
score=result.similarity,
|
|
75
|
+
)
|
|
76
|
+
for result in results
|
|
77
|
+
if result.record.payload is not None
|
|
78
|
+
]
|
|
79
|
+
|
|
80
|
+
def write_records(self, records: List[MemoryRecord]) -> None:
|
|
81
|
+
"""
|
|
82
|
+
Converts the provided chat messages into vector representations and
|
|
83
|
+
writes them to the vector database.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
records (List[MemoryRecord]): Memory records to be added to the
|
|
87
|
+
memory.
|
|
88
|
+
"""
|
|
89
|
+
v_records = [
|
|
90
|
+
VectorRecord(
|
|
91
|
+
vector=self.embedding.embed(record.message.content),
|
|
92
|
+
payload=record.to_dict(),
|
|
93
|
+
id=str(record.uuid),
|
|
94
|
+
)
|
|
95
|
+
for record in records
|
|
96
|
+
]
|
|
97
|
+
self.storage.add(v_records)
|
|
98
|
+
|
|
99
|
+
def clear(self) -> None:
|
|
100
|
+
r"""Removes all records from the vector database memory."""
|
|
101
|
+
self.storage.clear()
|
|
@@ -12,7 +12,8 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
|
|
15
|
-
from .base import BaseContextCreator
|
|
16
15
|
from .score_based import ScoreBasedContextCreator
|
|
17
16
|
|
|
18
|
-
__all__ = [
|
|
17
|
+
__all__ = [
|
|
18
|
+
'ScoreBasedContextCreator',
|
|
19
|
+
]
|
|
@@ -14,8 +14,8 @@
|
|
|
14
14
|
from dataclasses import dataclass
|
|
15
15
|
from typing import List, Tuple
|
|
16
16
|
|
|
17
|
-
from camel.memories import
|
|
18
|
-
from camel.memories.
|
|
17
|
+
from camel.memories.base import BaseContextCreator
|
|
18
|
+
from camel.memories.records import ContextRecord
|
|
19
19
|
from camel.messages import OpenAIMessage
|
|
20
20
|
from camel.utils import BaseTokenCounter
|
|
21
21
|
|
|
@@ -43,8 +43,9 @@ class ScoreBasedContextCreator(BaseContextCreator):
|
|
|
43
43
|
generated context.
|
|
44
44
|
"""
|
|
45
45
|
|
|
46
|
-
def __init__(
|
|
47
|
-
|
|
46
|
+
def __init__(
|
|
47
|
+
self, token_counter: BaseTokenCounter, token_limit: int
|
|
48
|
+
) -> None:
|
|
48
49
|
self._token_counter = token_counter
|
|
49
50
|
self._token_limit = token_limit
|
|
50
51
|
|
|
@@ -79,14 +80,22 @@ class ScoreBasedContextCreator(BaseContextCreator):
|
|
|
79
80
|
RuntimeError: If it's impossible to create a valid context without
|
|
80
81
|
exceeding the token limit.
|
|
81
82
|
"""
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
83
|
+
# Create unique context units list
|
|
84
|
+
uuid_set = set()
|
|
85
|
+
context_units = []
|
|
86
|
+
for idx, record in enumerate(records):
|
|
87
|
+
if record.memory_record.uuid not in uuid_set:
|
|
88
|
+
uuid_set.add(record.memory_record.uuid)
|
|
89
|
+
context_units.append(
|
|
90
|
+
_ContextUnit(
|
|
91
|
+
idx,
|
|
92
|
+
record,
|
|
93
|
+
self.token_counter.count_tokens_from_messages(
|
|
94
|
+
[record.memory_record.to_openai_message()]
|
|
95
|
+
),
|
|
96
|
+
)
|
|
97
|
+
)
|
|
98
|
+
|
|
90
99
|
# TODO: optimize the process, may give information back to memory
|
|
91
100
|
|
|
92
101
|
# If not exceed token limit, simply return
|
|
@@ -95,27 +104,30 @@ class ScoreBasedContextCreator(BaseContextCreator):
|
|
|
95
104
|
return self._create_output(context_units)
|
|
96
105
|
|
|
97
106
|
# Sort by score
|
|
98
|
-
context_units = sorted(
|
|
99
|
-
|
|
107
|
+
context_units = sorted(
|
|
108
|
+
context_units, key=lambda unit: unit.record.score
|
|
109
|
+
)
|
|
100
110
|
|
|
101
|
-
# Remove least score messages until total token number is smaller
|
|
111
|
+
# Remove the least score messages until total token number is smaller
|
|
102
112
|
# than token limit
|
|
103
113
|
truncate_idx = None
|
|
104
114
|
for i, unit in enumerate(context_units):
|
|
105
115
|
if unit.record.score == 1:
|
|
106
116
|
raise RuntimeError(
|
|
107
|
-
"Cannot create context: exceed token limit.", total_tokens
|
|
117
|
+
"Cannot create context: exceed token limit.", total_tokens
|
|
118
|
+
)
|
|
108
119
|
total_tokens -= unit.num_tokens
|
|
109
120
|
if total_tokens <= self.token_limit:
|
|
110
121
|
truncate_idx = i
|
|
111
122
|
break
|
|
112
123
|
if truncate_idx is None:
|
|
113
|
-
raise RuntimeError(
|
|
114
|
-
|
|
115
|
-
|
|
124
|
+
raise RuntimeError(
|
|
125
|
+
"Cannot create context: exceed token limit.", total_tokens
|
|
126
|
+
)
|
|
127
|
+
return self._create_output(context_units[truncate_idx + 1 :])
|
|
116
128
|
|
|
117
129
|
def _create_output(
|
|
118
|
-
|
|
130
|
+
self, context_units: List[_ContextUnit]
|
|
119
131
|
) -> Tuple[List[OpenAIMessage], int]:
|
|
120
132
|
r"""Helper method to generate output from context units.
|
|
121
133
|
|
camel/memories/records.py
CHANGED
|
@@ -37,6 +37,7 @@ class MemoryRecord:
|
|
|
37
37
|
key-value pairs that provide more information. If not given, it
|
|
38
38
|
will be an empty `Dict`.
|
|
39
39
|
"""
|
|
40
|
+
|
|
40
41
|
message: BaseMessage
|
|
41
42
|
role_at_backend: OpenAIBackendRole
|
|
42
43
|
uuid: UUID = field(default_factory=uuid4)
|
|
@@ -44,7 +45,7 @@ class MemoryRecord:
|
|
|
44
45
|
|
|
45
46
|
_MESSAGE_TYPES: ClassVar[dict] = {
|
|
46
47
|
"BaseMessage": BaseMessage,
|
|
47
|
-
"FunctionCallingMessage": FunctionCallingMessage
|
|
48
|
+
"FunctionCallingMessage": FunctionCallingMessage,
|
|
48
49
|
}
|
|
49
50
|
|
|
50
51
|
@classmethod
|
|
@@ -73,10 +74,10 @@ class MemoryRecord:
|
|
|
73
74
|
"uuid": str(self.uuid),
|
|
74
75
|
"message": {
|
|
75
76
|
"__class__": self.message.__class__.__name__,
|
|
76
|
-
**asdict(self.message)
|
|
77
|
+
**asdict(self.message),
|
|
77
78
|
},
|
|
78
79
|
"role_at_backend": self.role_at_backend,
|
|
79
|
-
"extra_info": self.extra_info
|
|
80
|
+
"extra_info": self.extra_info,
|
|
80
81
|
}
|
|
81
82
|
|
|
82
83
|
def to_openai_message(self) -> OpenAIMessage:
|
|
@@ -86,7 +87,7 @@ class MemoryRecord:
|
|
|
86
87
|
|
|
87
88
|
@dataclass(frozen=True)
|
|
88
89
|
class ContextRecord:
|
|
89
|
-
r"""The result of memory retrieving.
|
|
90
|
-
|
|
90
|
+
r"""The result of memory retrieving."""
|
|
91
|
+
|
|
91
92
|
memory_record: MemoryRecord
|
|
92
93
|
score: float
|
camel/messages/__init__.py
CHANGED
|
@@ -12,11 +12,11 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from camel.types import (
|
|
15
|
-
ChatCompletionSystemMessageParam,
|
|
16
15
|
ChatCompletionAssistantMessageParam,
|
|
17
|
-
ChatCompletionUserMessageParam,
|
|
18
16
|
ChatCompletionFunctionMessageParam,
|
|
19
17
|
ChatCompletionMessageParam,
|
|
18
|
+
ChatCompletionSystemMessageParam,
|
|
19
|
+
ChatCompletionUserMessageParam,
|
|
20
20
|
)
|
|
21
21
|
|
|
22
22
|
OpenAISystemMessage = ChatCompletionSystemMessageParam
|