camel-ai 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +30 -0
- camel/agents/__init__.py +40 -0
- camel/agents/base.py +29 -0
- camel/agents/chat_agent.py +539 -0
- camel/agents/critic_agent.py +179 -0
- camel/agents/embodied_agent.py +138 -0
- camel/agents/role_assignment_agent.py +117 -0
- camel/agents/task_agent.py +382 -0
- camel/agents/tool_agents/__init__.py +20 -0
- camel/agents/tool_agents/base.py +40 -0
- camel/agents/tool_agents/hugging_face_tool_agent.py +203 -0
- camel/configs.py +159 -0
- camel/embeddings/__init__.py +20 -0
- camel/embeddings/base.py +65 -0
- camel/embeddings/openai_embedding.py +74 -0
- camel/functions/__init__.py +27 -0
- camel/functions/base_io_functions.py +261 -0
- camel/functions/math_functions.py +61 -0
- camel/functions/openai_function.py +88 -0
- camel/functions/search_functions.py +309 -0
- camel/functions/unstructured_io_fuctions.py +616 -0
- camel/functions/weather_functions.py +136 -0
- camel/generators.py +263 -0
- camel/human.py +130 -0
- camel/memories/__init__.py +28 -0
- camel/memories/base.py +75 -0
- camel/memories/chat_history_memory.py +111 -0
- camel/memories/context_creators/__init__.py +18 -0
- camel/memories/context_creators/base.py +72 -0
- camel/memories/context_creators/score_based.py +130 -0
- camel/memories/records.py +92 -0
- camel/messages/__init__.py +38 -0
- camel/messages/base.py +223 -0
- camel/messages/func_message.py +106 -0
- camel/models/__init__.py +26 -0
- camel/models/base_model.py +110 -0
- camel/models/model_factory.py +59 -0
- camel/models/open_source_model.py +144 -0
- camel/models/openai_model.py +103 -0
- camel/models/stub_model.py +106 -0
- camel/prompts/__init__.py +38 -0
- camel/prompts/ai_society.py +121 -0
- camel/prompts/base.py +227 -0
- camel/prompts/code.py +111 -0
- camel/prompts/evaluation.py +40 -0
- camel/prompts/misalignment.py +84 -0
- camel/prompts/prompt_templates.py +117 -0
- camel/prompts/role_description_prompt_template.py +53 -0
- camel/prompts/solution_extraction.py +44 -0
- camel/prompts/task_prompt_template.py +56 -0
- camel/prompts/translation.py +42 -0
- camel/responses/__init__.py +18 -0
- camel/responses/agent_responses.py +42 -0
- camel/societies/__init__.py +20 -0
- camel/societies/babyagi_playing.py +254 -0
- camel/societies/role_playing.py +456 -0
- camel/storages/__init__.py +23 -0
- camel/storages/key_value_storages/__init__.py +23 -0
- camel/storages/key_value_storages/base.py +57 -0
- camel/storages/key_value_storages/in_memory.py +51 -0
- camel/storages/key_value_storages/json.py +97 -0
- camel/terminators/__init__.py +23 -0
- camel/terminators/base.py +44 -0
- camel/terminators/response_terminator.py +118 -0
- camel/terminators/token_limit_terminator.py +55 -0
- camel/types/__init__.py +54 -0
- camel/types/enums.py +176 -0
- camel/types/openai_types.py +39 -0
- camel/utils/__init__.py +47 -0
- camel/utils/commons.py +243 -0
- camel/utils/python_interpreter.py +435 -0
- camel/utils/token_counting.py +220 -0
- camel_ai-0.1.1.dist-info/METADATA +311 -0
- camel_ai-0.1.1.dist-info/RECORD +75 -0
- camel_ai-0.1.1.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,456 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import Dict, List, Optional, Sequence, Tuple, Union
|
|
15
|
+
|
|
16
|
+
from camel.agents import (
|
|
17
|
+
ChatAgent,
|
|
18
|
+
CriticAgent,
|
|
19
|
+
TaskPlannerAgent,
|
|
20
|
+
TaskSpecifyAgent,
|
|
21
|
+
)
|
|
22
|
+
from camel.generators import SystemMessageGenerator
|
|
23
|
+
from camel.human import Human
|
|
24
|
+
from camel.messages import BaseMessage
|
|
25
|
+
from camel.prompts import TextPrompt
|
|
26
|
+
from camel.responses import ChatAgentResponse
|
|
27
|
+
from camel.types import ModelType, RoleType, TaskType
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class RolePlaying:
|
|
31
|
+
r"""Role playing between two agents.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
assistant_role_name (str): The name of the role played by the
|
|
35
|
+
assistant.
|
|
36
|
+
user_role_name (str): The name of the role played by the user.
|
|
37
|
+
critic_role_name (str): The name of the role played by the critic.
|
|
38
|
+
Role name with :obj:`"human"` will set critic as a :obj:`Human`
|
|
39
|
+
agent, else will create a :obj:`CriticAgent`.
|
|
40
|
+
(default: :obj:`"critic"`)
|
|
41
|
+
task_prompt (str, optional): A prompt for the task to be performed.
|
|
42
|
+
(default: :obj:`""`)
|
|
43
|
+
with_task_specify (bool, optional): Whether to use a task specify
|
|
44
|
+
agent. (default: :obj:`True`)
|
|
45
|
+
with_task_planner (bool, optional): Whether to use a task planner
|
|
46
|
+
agent. (default: :obj:`False`)
|
|
47
|
+
with_critic_in_the_loop (bool, optional): Whether to include a critic
|
|
48
|
+
in the loop. (default: :obj:`False`)
|
|
49
|
+
critic_criteria (str, optional): Critic criteria for the critic agent.
|
|
50
|
+
If not specified, set the criteria to improve task performance.
|
|
51
|
+
model_type (ModelType, optional): Model type that will be used for
|
|
52
|
+
role playing. If specified, it will override the model in all
|
|
53
|
+
agents. (default: :obj:`None`)
|
|
54
|
+
task_type (TaskType, optional): The type of task to perform.
|
|
55
|
+
(default: :obj:`TaskType.AI_SOCIETY`)
|
|
56
|
+
assistant_agent_kwargs (Dict, optional): Additional arguments to pass
|
|
57
|
+
to the assistant agent. (default: :obj:`None`)
|
|
58
|
+
user_agent_kwargs (Dict, optional): Additional arguments to pass to
|
|
59
|
+
the user agent. (default: :obj:`None`)
|
|
60
|
+
task_specify_agent_kwargs (Dict, optional): Additional arguments to
|
|
61
|
+
pass to the task specify agent. (default: :obj:`None`)
|
|
62
|
+
task_planner_agent_kwargs (Dict, optional): Additional arguments to
|
|
63
|
+
pass to the task planner agent. (default: :obj:`None`)
|
|
64
|
+
critic_kwargs (Dict, optional): Additional arguments to pass to the
|
|
65
|
+
critic. (default: :obj:`None`)
|
|
66
|
+
sys_msg_generator_kwargs (Dict, optional): Additional arguments to
|
|
67
|
+
pass to the system message generator. (default: :obj:`None`)
|
|
68
|
+
extend_sys_msg_meta_dicts (List[Dict], optional): A list of dicts to
|
|
69
|
+
extend the system message meta dicts with. (default: :obj:`None`)
|
|
70
|
+
extend_task_specify_meta_dict (Dict, optional): A dict to extend the
|
|
71
|
+
task specify meta dict with. (default: :obj:`None`)
|
|
72
|
+
output_language (str, optional): The language to be output by the
|
|
73
|
+
agents. (default: :obj:`None`)
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __init__(
|
|
77
|
+
self,
|
|
78
|
+
assistant_role_name: str,
|
|
79
|
+
user_role_name: str,
|
|
80
|
+
*,
|
|
81
|
+
critic_role_name: str = "critic",
|
|
82
|
+
task_prompt: str = "",
|
|
83
|
+
with_task_specify: bool = True,
|
|
84
|
+
with_task_planner: bool = False,
|
|
85
|
+
with_critic_in_the_loop: bool = False,
|
|
86
|
+
critic_criteria: Optional[str] = None,
|
|
87
|
+
model_type: Optional[ModelType] = None,
|
|
88
|
+
task_type: TaskType = TaskType.AI_SOCIETY,
|
|
89
|
+
assistant_agent_kwargs: Optional[Dict] = None,
|
|
90
|
+
user_agent_kwargs: Optional[Dict] = None,
|
|
91
|
+
task_specify_agent_kwargs: Optional[Dict] = None,
|
|
92
|
+
task_planner_agent_kwargs: Optional[Dict] = None,
|
|
93
|
+
critic_kwargs: Optional[Dict] = None,
|
|
94
|
+
sys_msg_generator_kwargs: Optional[Dict] = None,
|
|
95
|
+
extend_sys_msg_meta_dicts: Optional[List[Dict]] = None,
|
|
96
|
+
extend_task_specify_meta_dict: Optional[Dict] = None,
|
|
97
|
+
output_language: Optional[str] = None,
|
|
98
|
+
) -> None:
|
|
99
|
+
self.with_task_specify = with_task_specify
|
|
100
|
+
self.with_task_planner = with_task_planner
|
|
101
|
+
self.with_critic_in_the_loop = with_critic_in_the_loop
|
|
102
|
+
self.model_type = model_type
|
|
103
|
+
self.task_type = task_type
|
|
104
|
+
self.task_prompt = task_prompt
|
|
105
|
+
|
|
106
|
+
self.specified_task_prompt: Optional[TextPrompt] = None
|
|
107
|
+
self.init_specified_task_prompt(assistant_role_name, user_role_name,
|
|
108
|
+
task_specify_agent_kwargs,
|
|
109
|
+
extend_task_specify_meta_dict,
|
|
110
|
+
output_language)
|
|
111
|
+
|
|
112
|
+
self.planned_task_prompt: Optional[TextPrompt] = None
|
|
113
|
+
self.init_planned_task_prompt(task_planner_agent_kwargs,
|
|
114
|
+
output_language)
|
|
115
|
+
|
|
116
|
+
sys_msg_generator = SystemMessageGenerator(
|
|
117
|
+
task_type=self.task_type, **(sys_msg_generator_kwargs or {}))
|
|
118
|
+
|
|
119
|
+
(init_assistant_sys_msg, init_user_sys_msg,
|
|
120
|
+
sys_msg_meta_dicts) = self.get_sys_message_info(
|
|
121
|
+
assistant_role_name, user_role_name, sys_msg_generator,
|
|
122
|
+
extend_sys_msg_meta_dicts)
|
|
123
|
+
|
|
124
|
+
self.assistant_agent: ChatAgent
|
|
125
|
+
self.user_agent: ChatAgent
|
|
126
|
+
self.assistant_sys_msg: BaseMessage
|
|
127
|
+
self.user_sys_msg: BaseMessage
|
|
128
|
+
self.init_agents(
|
|
129
|
+
init_assistant_sys_msg,
|
|
130
|
+
assistant_agent_kwargs,
|
|
131
|
+
init_user_sys_msg,
|
|
132
|
+
user_agent_kwargs,
|
|
133
|
+
output_language,
|
|
134
|
+
)
|
|
135
|
+
self.critic: Optional[Union[CriticAgent, Human]] = None
|
|
136
|
+
self.critic_sys_msg: Optional[BaseMessage] = None
|
|
137
|
+
self.init_critic(critic_role_name, critic_criteria, critic_kwargs,
|
|
138
|
+
sys_msg_generator, sys_msg_meta_dicts)
|
|
139
|
+
|
|
140
|
+
def init_specified_task_prompt(
|
|
141
|
+
self, assistant_role_name: str, user_role_name: str,
|
|
142
|
+
task_specify_agent_kwargs: Optional[Dict],
|
|
143
|
+
extend_task_specify_meta_dict: Optional[Dict],
|
|
144
|
+
output_language: Optional[str]):
|
|
145
|
+
r"""Use a task specify agent to generate a specified task prompt.
|
|
146
|
+
Generated specified task prompt will be used to replace original
|
|
147
|
+
task prompt. If there is no task specify agent, specified task
|
|
148
|
+
prompt will not be generated.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
assistant_role_name (str): The name of the role played by the
|
|
152
|
+
assistant.
|
|
153
|
+
user_role_name (str): The name of the role played by the user.
|
|
154
|
+
task_specify_agent_kwargs (Dict, optional): Additional arguments
|
|
155
|
+
to pass to the task specify agent.
|
|
156
|
+
extend_task_specify_meta_dict (Dict, optional): A dict to extend
|
|
157
|
+
the task specify meta dict with.
|
|
158
|
+
output_language (str, optional): The language to be output by the
|
|
159
|
+
agents.
|
|
160
|
+
"""
|
|
161
|
+
if self.with_task_specify:
|
|
162
|
+
task_specify_meta_dict = dict()
|
|
163
|
+
if self.task_type in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT]:
|
|
164
|
+
task_specify_meta_dict.update(
|
|
165
|
+
dict(assistant_role=assistant_role_name,
|
|
166
|
+
user_role=user_role_name))
|
|
167
|
+
task_specify_meta_dict.update(extend_task_specify_meta_dict or {})
|
|
168
|
+
if self.model_type is not None:
|
|
169
|
+
if task_specify_agent_kwargs is None:
|
|
170
|
+
task_specify_agent_kwargs = {}
|
|
171
|
+
task_specify_agent_kwargs.update(
|
|
172
|
+
dict(model_type=self.model_type))
|
|
173
|
+
task_specify_agent = TaskSpecifyAgent(
|
|
174
|
+
task_type=self.task_type,
|
|
175
|
+
output_language=output_language,
|
|
176
|
+
**(task_specify_agent_kwargs or {}),
|
|
177
|
+
)
|
|
178
|
+
self.specified_task_prompt = task_specify_agent.run(
|
|
179
|
+
self.task_prompt,
|
|
180
|
+
meta_dict=task_specify_meta_dict,
|
|
181
|
+
)
|
|
182
|
+
self.task_prompt = self.specified_task_prompt
|
|
183
|
+
|
|
184
|
+
def init_planned_task_prompt(self,
|
|
185
|
+
task_planner_agent_kwargs: Optional[Dict],
|
|
186
|
+
output_language: Optional[str]):
|
|
187
|
+
r"""Use a task plan agent to append a planned task prompt to task
|
|
188
|
+
prompt. The planned task prompt is generated based on the task
|
|
189
|
+
prompt, which can be original task prompt or specified task prompt
|
|
190
|
+
if available. If there is no task plan agent, planned task prompt
|
|
191
|
+
will not be generated.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
task_planner_agent_kwargs (Dict, optional): Additional arguments
|
|
195
|
+
to pass to the task planner agent.
|
|
196
|
+
output_language (str, optional): The language to be output by the
|
|
197
|
+
agents.
|
|
198
|
+
"""
|
|
199
|
+
if self.with_task_planner:
|
|
200
|
+
if self.model_type is not None:
|
|
201
|
+
if task_planner_agent_kwargs is None:
|
|
202
|
+
task_planner_agent_kwargs = {}
|
|
203
|
+
task_planner_agent_kwargs.update(
|
|
204
|
+
dict(model_type=self.model_type))
|
|
205
|
+
task_planner_agent = TaskPlannerAgent(
|
|
206
|
+
output_language=output_language,
|
|
207
|
+
**(task_planner_agent_kwargs or {}),
|
|
208
|
+
)
|
|
209
|
+
self.planned_task_prompt = task_planner_agent.run(self.task_prompt)
|
|
210
|
+
self.task_prompt = (f"{self.task_prompt}\n"
|
|
211
|
+
f"{self.planned_task_prompt}")
|
|
212
|
+
else:
|
|
213
|
+
self.planned_task_prompt = None
|
|
214
|
+
|
|
215
|
+
def get_sys_message_info(
|
|
216
|
+
self,
|
|
217
|
+
assistant_role_name: str,
|
|
218
|
+
user_role_name: str,
|
|
219
|
+
sys_msg_generator: SystemMessageGenerator,
|
|
220
|
+
extend_sys_msg_meta_dicts: Optional[List[Dict]] = None,
|
|
221
|
+
) -> Tuple[BaseMessage, BaseMessage, List[Dict]]:
|
|
222
|
+
r"""Get initial assistant and user system message with a list of
|
|
223
|
+
system message meta dicts.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
assistant_role_name (str): The name of the role played by the
|
|
227
|
+
assistant.
|
|
228
|
+
user_role_name (str): The name of the role played by the user.
|
|
229
|
+
sys_msg_generator (SystemMessageGenerator): A system message
|
|
230
|
+
generator for agents.
|
|
231
|
+
extend_sys_msg_meta_dicts (List[Dict], optional): A list of dicts
|
|
232
|
+
to extend the system message meta dicts with.
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
A tuple containing a `BaseMessage` representing the assistant's
|
|
236
|
+
initial system message, a `BaseMessage` representing the user's
|
|
237
|
+
initial system message, and a list of system message meta dicts.
|
|
238
|
+
"""
|
|
239
|
+
sys_msg_meta_dicts = [dict(task=self.task_prompt) for _ in range(2)]
|
|
240
|
+
if (extend_sys_msg_meta_dicts is None and self.task_type in [
|
|
241
|
+
TaskType.AI_SOCIETY,
|
|
242
|
+
TaskType.MISALIGNMENT,
|
|
243
|
+
]):
|
|
244
|
+
extend_sys_msg_meta_dicts = [
|
|
245
|
+
dict(assistant_role=assistant_role_name,
|
|
246
|
+
user_role=user_role_name) for _ in range(2)
|
|
247
|
+
]
|
|
248
|
+
|
|
249
|
+
if extend_sys_msg_meta_dicts is not None:
|
|
250
|
+
sys_msg_meta_dicts = [{
|
|
251
|
+
**sys_msg_meta_dict,
|
|
252
|
+
**extend_sys_msg_meta_dict
|
|
253
|
+
} for sys_msg_meta_dict, extend_sys_msg_meta_dict in zip(
|
|
254
|
+
sys_msg_meta_dicts, extend_sys_msg_meta_dicts)]
|
|
255
|
+
|
|
256
|
+
init_assistant_sys_msg, init_user_sys_msg = (
|
|
257
|
+
sys_msg_generator.from_dicts(
|
|
258
|
+
meta_dicts=sys_msg_meta_dicts,
|
|
259
|
+
role_tuples=[
|
|
260
|
+
(assistant_role_name, RoleType.ASSISTANT),
|
|
261
|
+
(user_role_name, RoleType.USER),
|
|
262
|
+
],
|
|
263
|
+
))
|
|
264
|
+
return init_assistant_sys_msg, init_user_sys_msg, sys_msg_meta_dicts
|
|
265
|
+
|
|
266
|
+
def init_agents(
|
|
267
|
+
self,
|
|
268
|
+
init_assistant_sys_msg: BaseMessage,
|
|
269
|
+
assistant_agent_kwargs: Optional[Dict],
|
|
270
|
+
init_user_sys_msg: BaseMessage,
|
|
271
|
+
user_agent_kwargs: Optional[Dict],
|
|
272
|
+
output_language: Optional[str],
|
|
273
|
+
):
|
|
274
|
+
r"""Initialize assistant and user agents with their system messages.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
init_assistant_sys_msg (BaseMessage): Assistant agent's initial
|
|
278
|
+
system message.
|
|
279
|
+
assistant_agent_kwargs (Dict, optional): Additional arguments to
|
|
280
|
+
pass to the assistant agent.
|
|
281
|
+
init_user_sys_msg (BaseMessage): User agent's initial system
|
|
282
|
+
message.
|
|
283
|
+
user_agent_kwargs (Dict, optional): Additional arguments to
|
|
284
|
+
pass to the user agent.
|
|
285
|
+
output_language (str, optional): The language to be output by the
|
|
286
|
+
agents.
|
|
287
|
+
"""
|
|
288
|
+
if self.model_type is not None:
|
|
289
|
+
if assistant_agent_kwargs is None:
|
|
290
|
+
assistant_agent_kwargs = {}
|
|
291
|
+
assistant_agent_kwargs.update(dict(model_type=self.model_type))
|
|
292
|
+
if user_agent_kwargs is None:
|
|
293
|
+
user_agent_kwargs = {}
|
|
294
|
+
user_agent_kwargs.update(dict(model_type=self.model_type))
|
|
295
|
+
|
|
296
|
+
self.assistant_agent = ChatAgent(
|
|
297
|
+
init_assistant_sys_msg,
|
|
298
|
+
output_language=output_language,
|
|
299
|
+
**(assistant_agent_kwargs or {}),
|
|
300
|
+
)
|
|
301
|
+
self.assistant_sys_msg = self.assistant_agent.system_message
|
|
302
|
+
|
|
303
|
+
self.user_agent = ChatAgent(
|
|
304
|
+
init_user_sys_msg,
|
|
305
|
+
output_language=output_language,
|
|
306
|
+
**(user_agent_kwargs or {}),
|
|
307
|
+
)
|
|
308
|
+
self.user_sys_msg = self.user_agent.system_message
|
|
309
|
+
|
|
310
|
+
def init_critic(self, critic_role_name: str,
|
|
311
|
+
critic_criteria: Optional[str],
|
|
312
|
+
critic_kwargs: Optional[Dict],
|
|
313
|
+
sys_msg_generator: SystemMessageGenerator,
|
|
314
|
+
sys_msg_meta_dicts: List[Dict]):
|
|
315
|
+
r"""Initialize critic agent. If critic role name is :obj:`"human"`,
|
|
316
|
+
create a :obj:`Human` critic agent. Else, create a :obj:`CriticAgent`
|
|
317
|
+
critic agent with specified critic criteria. If the critic criteria
|
|
318
|
+
is not specified, set it to improve task performance.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
critic_role_name (str): The name of the role played by the critic.
|
|
322
|
+
critic_criteria (str, optional): Critic criteria for the
|
|
323
|
+
critic agent. If not specified, set the criteria to
|
|
324
|
+
improve task performance.
|
|
325
|
+
critic_kwargs (Dict, optional): Additional arguments to
|
|
326
|
+
pass to the critic.
|
|
327
|
+
sys_msg_generator (SystemMessageGenerator): A system message
|
|
328
|
+
generator for agents.
|
|
329
|
+
sys_msg_meta_dicts (list): A list of system message meta dicts.
|
|
330
|
+
"""
|
|
331
|
+
if self.with_critic_in_the_loop:
|
|
332
|
+
if critic_role_name.lower() == "human":
|
|
333
|
+
self.critic = Human(**(critic_kwargs or {}))
|
|
334
|
+
else:
|
|
335
|
+
critic_criteria = (critic_criteria
|
|
336
|
+
or "improving the task performance")
|
|
337
|
+
critic_msg_meta_dict = dict(critic_role=critic_role_name,
|
|
338
|
+
criteria=critic_criteria,
|
|
339
|
+
**sys_msg_meta_dicts[0])
|
|
340
|
+
self.critic_sys_msg = sys_msg_generator.from_dict(
|
|
341
|
+
critic_msg_meta_dict,
|
|
342
|
+
role_tuple=(critic_role_name, RoleType.CRITIC),
|
|
343
|
+
)
|
|
344
|
+
if self.model_type is not None:
|
|
345
|
+
if critic_kwargs is None:
|
|
346
|
+
critic_kwargs = {}
|
|
347
|
+
critic_kwargs.update(dict(model_type=self.model_type))
|
|
348
|
+
self.critic = CriticAgent(
|
|
349
|
+
self.critic_sys_msg,
|
|
350
|
+
**(critic_kwargs or {}),
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
def init_chat(self) -> Tuple[BaseMessage, List[BaseMessage]]:
|
|
354
|
+
r"""Initializes the chat by resetting both of the assistant and user
|
|
355
|
+
agents, and sending the system messages again to the agents using
|
|
356
|
+
chat messages. Returns the assistant's introductory message and the
|
|
357
|
+
user's response messages.
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
A tuple containing a `BaseMessage` representing the assistant's
|
|
361
|
+
introductory message, and a list of `BaseMessage` representing
|
|
362
|
+
the user's response messages.
|
|
363
|
+
"""
|
|
364
|
+
self.assistant_agent.reset()
|
|
365
|
+
self.user_agent.reset()
|
|
366
|
+
|
|
367
|
+
# Send the system messages again to the agents using chat messages
|
|
368
|
+
assistant_msg = BaseMessage.make_assistant_message(
|
|
369
|
+
role_name=self.assistant_sys_msg.role_name,
|
|
370
|
+
content=(f"{self.user_sys_msg.content}. "
|
|
371
|
+
"Now start to give me instructions one by one. "
|
|
372
|
+
"Only reply with Instruction and Input."))
|
|
373
|
+
user_msg = BaseMessage.make_user_message(
|
|
374
|
+
role_name=self.user_sys_msg.role_name,
|
|
375
|
+
content=f"{self.assistant_sys_msg.content}")
|
|
376
|
+
assistant_response = self.assistant_agent.step(user_msg)
|
|
377
|
+
if assistant_response.terminated or assistant_response.msgs is None:
|
|
378
|
+
raise ValueError(f"Assistant agent terminated unexpectedly. "
|
|
379
|
+
f"Error info: {assistant_response.info}")
|
|
380
|
+
|
|
381
|
+
return assistant_msg, assistant_response.msgs
|
|
382
|
+
|
|
383
|
+
def reduce_message_options(
|
|
384
|
+
self,
|
|
385
|
+
messages: Sequence[BaseMessage],
|
|
386
|
+
) -> BaseMessage:
|
|
387
|
+
r"""Processes a sequence of chat messages, returning the processed
|
|
388
|
+
message. If multiple messages are provided and
|
|
389
|
+
`with_critic_in_the_loop` is `False`, raises a `ValueError`.
|
|
390
|
+
If no messages are provided, a `ValueError` will be raised.
|
|
391
|
+
|
|
392
|
+
Args:
|
|
393
|
+
messages: A sequence of `BaseMessage` objects to process.
|
|
394
|
+
|
|
395
|
+
Returns:
|
|
396
|
+
A single `BaseMessage` representing the processed message.
|
|
397
|
+
"""
|
|
398
|
+
if len(messages) == 0:
|
|
399
|
+
raise ValueError("No messages to process.")
|
|
400
|
+
if len(messages) > 1 and not self.with_critic_in_the_loop:
|
|
401
|
+
raise ValueError("Got than one message to process. "
|
|
402
|
+
f"Num of messages: {len(messages)}.")
|
|
403
|
+
elif self.with_critic_in_the_loop and self.critic is not None:
|
|
404
|
+
critic_response = self.critic.reduce_step(messages)
|
|
405
|
+
processed_msg = critic_response.msg
|
|
406
|
+
else:
|
|
407
|
+
processed_msg = messages[0]
|
|
408
|
+
|
|
409
|
+
return processed_msg
|
|
410
|
+
|
|
411
|
+
def step(
|
|
412
|
+
self,
|
|
413
|
+
assistant_msg: BaseMessage,
|
|
414
|
+
) -> Tuple[ChatAgentResponse, ChatAgentResponse]:
|
|
415
|
+
r"""Advances the conversation by taking a message from the assistant,
|
|
416
|
+
processing it using the user agent, and then processing the resulting
|
|
417
|
+
message using the assistant agent. Returns a tuple containing the
|
|
418
|
+
resulting assistant message, whether the assistant agent terminated
|
|
419
|
+
the conversation, and any additional assistant information, as well as
|
|
420
|
+
a tuple containing the resulting user message, whether the user agent
|
|
421
|
+
terminated the conversation, and any additional user information.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
assistant_msg: A `BaseMessage` representing the message from the
|
|
425
|
+
assistant.
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
A tuple containing two ChatAgentResponse: the first struct contains
|
|
429
|
+
the resulting assistant message, whether the assistant agent
|
|
430
|
+
terminated the conversation, and any additional assistant
|
|
431
|
+
information; the second struct contains the resulting user message,
|
|
432
|
+
whether the user agent terminated the conversation, and any
|
|
433
|
+
additional user information.
|
|
434
|
+
"""
|
|
435
|
+
user_response = self.user_agent.step(assistant_msg)
|
|
436
|
+
if user_response.terminated or user_response.msgs is None:
|
|
437
|
+
return (ChatAgentResponse([], False, {}),
|
|
438
|
+
ChatAgentResponse([], user_response.terminated,
|
|
439
|
+
user_response.info))
|
|
440
|
+
user_msg = self.reduce_message_options(user_response.msgs)
|
|
441
|
+
self.user_agent.record_message(user_msg)
|
|
442
|
+
|
|
443
|
+
assistant_response = self.assistant_agent.step(user_msg)
|
|
444
|
+
if assistant_response.terminated or assistant_response.msgs is None:
|
|
445
|
+
return (ChatAgentResponse([], assistant_response.terminated,
|
|
446
|
+
assistant_response.info),
|
|
447
|
+
ChatAgentResponse([user_msg], False, user_response.info))
|
|
448
|
+
assistant_msg = self.reduce_message_options(assistant_response.msgs)
|
|
449
|
+
self.assistant_agent.record_message(assistant_msg)
|
|
450
|
+
|
|
451
|
+
return (
|
|
452
|
+
ChatAgentResponse([assistant_msg], assistant_response.terminated,
|
|
453
|
+
assistant_response.info),
|
|
454
|
+
ChatAgentResponse([user_msg], user_response.terminated,
|
|
455
|
+
user_response.info),
|
|
456
|
+
)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
from .key_value_storages.base import BaseKeyValueStorage
|
|
16
|
+
from .key_value_storages.in_memory import InMemoryKeyValueStorage
|
|
17
|
+
from .key_value_storages.json import JsonStorage
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
'BaseKeyValueStorage',
|
|
21
|
+
'InMemoryKeyValueStorage',
|
|
22
|
+
'JsonStorage',
|
|
23
|
+
]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
from .base import BaseKeyValueStorage
|
|
16
|
+
from .in_memory import InMemoryKeyValueStorage
|
|
17
|
+
from .json import JsonStorage
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
'BaseKeyValueStorage',
|
|
21
|
+
'InMemoryKeyValueStorage',
|
|
22
|
+
'JsonStorage',
|
|
23
|
+
]
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
from abc import ABC, abstractmethod
|
|
16
|
+
from typing import Any, Dict, List
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class BaseKeyValueStorage(ABC):
|
|
20
|
+
r"""An abstract base class for key-value storage systems. Provides a
|
|
21
|
+
consistent interface for saving, loading, and clearing data records without
|
|
22
|
+
any loss of information.
|
|
23
|
+
|
|
24
|
+
An abstract base class designed to serve as a foundation for various
|
|
25
|
+
key-value storage systems. The class primarily interacts through Python
|
|
26
|
+
dictionaries.
|
|
27
|
+
|
|
28
|
+
This class is meant to be inherited by multiple types of key-value storage
|
|
29
|
+
implementations, including, but not limited to, JSON file storage, NoSQL
|
|
30
|
+
databases like MongoDB and Redis, as well as in-memory Python dictionaries.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
@abstractmethod
|
|
34
|
+
def save(self, records: List[Dict[str, Any]]) -> None:
|
|
35
|
+
r"""Saves a batch of records to the key-value storage system.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
records (List[Dict[str, Any]]): A list of dictionaries, where each
|
|
39
|
+
dictionary represents a unique record to be stored.
|
|
40
|
+
"""
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
@abstractmethod
|
|
44
|
+
def load(self) -> List[Dict[str, Any]]:
|
|
45
|
+
r"""Loads all stored records from the key-value storage system.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
List[Dict[str, Any]]: A list of dictionaries, where each dictionary
|
|
49
|
+
represents a stored record.
|
|
50
|
+
"""
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
@abstractmethod
|
|
54
|
+
def clear(self) -> None:
|
|
55
|
+
r"""Removes all records from the key-value storage system.
|
|
56
|
+
"""
|
|
57
|
+
pass
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
from copy import deepcopy
|
|
16
|
+
from typing import Any, Dict, List
|
|
17
|
+
|
|
18
|
+
from camel.storages.key_value_storages import BaseKeyValueStorage
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class InMemoryKeyValueStorage(BaseKeyValueStorage):
|
|
22
|
+
r"""A concrete implementation of the :obj:`BaseKeyValueStorage` using
|
|
23
|
+
in-memory list. Ideal for temporary storage purposes, as data will be lost
|
|
24
|
+
when the program ends.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self) -> None:
|
|
28
|
+
self.memory_list: List[Dict] = []
|
|
29
|
+
|
|
30
|
+
def save(self, records: List[Dict[str, Any]]) -> None:
|
|
31
|
+
r"""Saves a batch of records to the key-value storage system.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
records (List[Dict[str, Any]]): A list of dictionaries, where each
|
|
35
|
+
dictionary represents a unique record to be stored.
|
|
36
|
+
"""
|
|
37
|
+
self.memory_list.extend(deepcopy(records))
|
|
38
|
+
|
|
39
|
+
def load(self) -> List[Dict[str, Any]]:
|
|
40
|
+
r"""Loads all stored records from the key-value storage system.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
List[Dict[str, Any]]: A list of dictionaries, where each dictionary
|
|
44
|
+
represents a stored record.
|
|
45
|
+
"""
|
|
46
|
+
return deepcopy(self.memory_list)
|
|
47
|
+
|
|
48
|
+
def clear(self) -> None:
|
|
49
|
+
r"""Removes all records from the key-value storage system.
|
|
50
|
+
"""
|
|
51
|
+
self.memory_list.clear()
|