camel-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (99) hide show
  1. camel/__init__.py +1 -11
  2. camel/agents/__init__.py +5 -5
  3. camel/agents/chat_agent.py +124 -63
  4. camel/agents/critic_agent.py +28 -17
  5. camel/agents/deductive_reasoner_agent.py +235 -0
  6. camel/agents/embodied_agent.py +92 -40
  7. camel/agents/role_assignment_agent.py +27 -17
  8. camel/agents/task_agent.py +60 -34
  9. camel/agents/tool_agents/base.py +0 -1
  10. camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
  11. camel/configs.py +119 -7
  12. camel/embeddings/__init__.py +2 -0
  13. camel/embeddings/base.py +3 -2
  14. camel/embeddings/openai_embedding.py +3 -3
  15. camel/embeddings/sentence_transformers_embeddings.py +65 -0
  16. camel/functions/__init__.py +13 -3
  17. camel/functions/google_maps_function.py +335 -0
  18. camel/functions/math_functions.py +7 -7
  19. camel/functions/openai_function.py +344 -42
  20. camel/functions/search_functions.py +100 -35
  21. camel/functions/twitter_function.py +484 -0
  22. camel/functions/weather_functions.py +36 -23
  23. camel/generators.py +65 -46
  24. camel/human.py +17 -11
  25. camel/interpreters/__init__.py +25 -0
  26. camel/interpreters/base.py +49 -0
  27. camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
  28. camel/interpreters/interpreter_error.py +19 -0
  29. camel/interpreters/subprocess_interpreter.py +190 -0
  30. camel/loaders/__init__.py +22 -0
  31. camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
  32. camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
  33. camel/memories/__init__.py +17 -7
  34. camel/memories/agent_memories.py +156 -0
  35. camel/memories/base.py +97 -32
  36. camel/memories/blocks/__init__.py +21 -0
  37. camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
  38. camel/memories/blocks/vectordb_block.py +101 -0
  39. camel/memories/context_creators/__init__.py +3 -2
  40. camel/memories/context_creators/score_based.py +32 -20
  41. camel/memories/records.py +6 -5
  42. camel/messages/__init__.py +2 -2
  43. camel/messages/base.py +99 -16
  44. camel/messages/func_message.py +7 -4
  45. camel/models/__init__.py +4 -2
  46. camel/models/anthropic_model.py +132 -0
  47. camel/models/base_model.py +3 -2
  48. camel/models/model_factory.py +10 -8
  49. camel/models/open_source_model.py +25 -13
  50. camel/models/openai_model.py +9 -10
  51. camel/models/stub_model.py +6 -5
  52. camel/prompts/__init__.py +7 -5
  53. camel/prompts/ai_society.py +21 -14
  54. camel/prompts/base.py +54 -47
  55. camel/prompts/code.py +22 -14
  56. camel/prompts/evaluation.py +8 -5
  57. camel/prompts/misalignment.py +26 -19
  58. camel/prompts/object_recognition.py +35 -0
  59. camel/prompts/prompt_templates.py +14 -8
  60. camel/prompts/role_description_prompt_template.py +16 -10
  61. camel/prompts/solution_extraction.py +9 -5
  62. camel/prompts/task_prompt_template.py +24 -21
  63. camel/prompts/translation.py +9 -5
  64. camel/responses/agent_responses.py +5 -2
  65. camel/retrievers/__init__.py +24 -0
  66. camel/retrievers/auto_retriever.py +319 -0
  67. camel/retrievers/base.py +64 -0
  68. camel/retrievers/bm25_retriever.py +149 -0
  69. camel/retrievers/vector_retriever.py +166 -0
  70. camel/societies/__init__.py +1 -1
  71. camel/societies/babyagi_playing.py +56 -32
  72. camel/societies/role_playing.py +188 -133
  73. camel/storages/__init__.py +18 -0
  74. camel/storages/graph_storages/__init__.py +23 -0
  75. camel/storages/graph_storages/base.py +82 -0
  76. camel/storages/graph_storages/graph_element.py +74 -0
  77. camel/storages/graph_storages/neo4j_graph.py +582 -0
  78. camel/storages/key_value_storages/base.py +1 -2
  79. camel/storages/key_value_storages/in_memory.py +1 -2
  80. camel/storages/key_value_storages/json.py +8 -13
  81. camel/storages/vectordb_storages/__init__.py +33 -0
  82. camel/storages/vectordb_storages/base.py +202 -0
  83. camel/storages/vectordb_storages/milvus.py +396 -0
  84. camel/storages/vectordb_storages/qdrant.py +371 -0
  85. camel/terminators/__init__.py +1 -1
  86. camel/terminators/base.py +2 -3
  87. camel/terminators/response_terminator.py +21 -12
  88. camel/terminators/token_limit_terminator.py +5 -3
  89. camel/types/__init__.py +12 -6
  90. camel/types/enums.py +86 -13
  91. camel/types/openai_types.py +10 -5
  92. camel/utils/__init__.py +18 -13
  93. camel/utils/commons.py +242 -81
  94. camel/utils/token_counting.py +135 -15
  95. {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/METADATA +116 -74
  96. camel_ai-0.1.3.dist-info/RECORD +101 -0
  97. {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/WHEEL +1 -1
  98. camel/memories/context_creators/base.py +0 -72
  99. camel_ai-0.1.1.dist-info/RECORD +0 -75
@@ -0,0 +1,235 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import re
15
+ from typing import Dict, List, Optional, Union
16
+
17
+ from camel.agents.chat_agent import ChatAgent
18
+ from camel.configs import BaseConfig
19
+ from camel.messages import BaseMessage
20
+ from camel.prompts import TextPrompt
21
+ from camel.types import ModelType, RoleType
22
+
23
+
24
+ class DeductiveReasonerAgent(ChatAgent):
25
+ r"""An agent responsible for deductive reasoning. Model of deductive
26
+ reasoning:
27
+ - L: A ⊕ C -> q * B
28
+ - A represents the known starting state.
29
+ - B represents the known target state.
30
+ - C represents the conditions required to transition from A to B.
31
+ - Q represents the quality or effectiveness of the transition from
32
+ A to B.
33
+ - L represents the path or process from A to B.
34
+
35
+ Args:
36
+ model_type (ModelType, optional): The type of model to use for the
37
+ agent. (default: :obj: `None`)
38
+ model_config (BaseConfig, optional): The configuration for the model.
39
+ (default: :obj:`None`)
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ model_type: Optional[ModelType] = None,
45
+ model_config: Optional[BaseConfig] = None,
46
+ ) -> None:
47
+ system_message = BaseMessage(
48
+ role_name="Insight Agent",
49
+ role_type=RoleType.ASSISTANT,
50
+ meta_dict=None,
51
+ content="You assign roles based on tasks.",
52
+ )
53
+ super().__init__(system_message, model_type, model_config)
54
+
55
+ def deduce_conditions_and_quality(
56
+ self,
57
+ starting_state: str,
58
+ target_state: str,
59
+ role_descriptions_dict: Optional[Dict[str, str]] = None,
60
+ ) -> Dict[str, Union[List[str], Dict[str, str]]]:
61
+ r"""Derives the conditions and quality from the starting state and the
62
+ target state based on the model of the deductive reasoning and the
63
+ knowledge base. It can optionally consider the roles involved in the
64
+ scenario, which allows tailoring the output more closely to the AI
65
+ agent's environment.
66
+
67
+ Args:
68
+ starting_state (str): The initial or starting state from which
69
+ conditions are deduced.
70
+ target_state (str): The target state of the task.
71
+ role_descriptions_dict (Optional[Dict[str, str]], optional): The
72
+ descriptions of the roles. (default: :obj:`None`)
73
+ role_descriptions_dict (Optional[Dict[str, str]], optional): A
74
+ dictionary describing the roles involved in the scenario. This
75
+ is optional and can be used to provide a context for the
76
+ CAMEL's role-playing, enabling the generation of more relevant
77
+ and tailored conditions and quality assessments. This could be
78
+ generated using a `RoleAssignmentAgent()` or defined manually
79
+ by the user.
80
+
81
+ Returns:
82
+ Dict[str, Union[List[str], Dict[str, str]]]: A dictionary with the
83
+ extracted data from the message. The dictionary contains three
84
+ keys:
85
+ - 'conditions': A list where each key is a condition ID and
86
+ each value is the corresponding condition text.
87
+ - 'labels': A list of label strings extracted from the message.
88
+ - 'quality': A string of quality assessment strings extracted
89
+ from the message.
90
+ """
91
+ self.reset()
92
+
93
+ deduce_prompt = """You are a deductive reasoner. You are tasked to complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the STARTING STATE A and the TARGET STATE B. You are given the CONTEXT CONTENT to help you complete the TASK.
94
+ Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY fill in the BLANKs, and DO NOT alter or modify any other part of the template
95
+
96
+ ===== MODELING OF DEDUCTIVE REASONING =====
97
+ You are tasked with understanding a mathematical model based on the components ${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
98
+ - $A$ represents the known starting state.
99
+ - $B$ represents the known target state.
100
+ - $C$ represents the conditions required to transition from $A$ to $B$.
101
+ - $Q$ represents the quality or effectiveness of the transition from $A$ to $B$.
102
+ - $L$ represents the path or process from $A$ to $B$.
103
+
104
+ ===== THOUGHT OF DEDUCTIVE REASONING =====
105
+ 1. Define the Parameters of A and B:
106
+ - Characterization: Before delving into transitions, thoroughly understand the nature and boundaries of both $A$ and $B$. This includes the type, properties, constraints, and possible interactions between the two.
107
+ - Contrast and Compare: Highlight the similarities and differences between $A$ and $B$. This comparative analysis will give an insight into what needs changing and what remains constant.
108
+ 2. Historical & Empirical Analysis:
109
+ - Previous Transitions according to the Knowledge Base of GPT: (if applicable) Extract conditions and patterns from the historical instances where a similar transition from a state comparable to $A$ moved towards $B$.
110
+ - Scientific Principles: (if applicable) Consider the underlying scientific principles governing or related to the states and their transition. For example, if $A$ and $B$ are physical states, laws of physics might apply.
111
+ 3. Logical Deduction of Conditions ($C$):
112
+ - Direct Path Analysis: What are the immediate and direct conditions required to move from $A$ to $B$?
113
+ - Intermediate States: Are there states between $A$ and $B$ that must be transversed or can be used to make the transition smoother or more efficient? If yes, what is the content?
114
+ - Constraints & Limitations: Identify potential barriers or restrictions in moving from $A$ to $B$. These can be external (e.g., environmental factors) or internal (properties of $A$ or $B$).
115
+ - Resource and Information Analysis: What resources and information are required for the transition? This could be time, entity, factor, code language, software platform, unknowns, etc.
116
+ - External Influences: Consider socio-economic, political, or environmental factors (if applicable) that could influence the transition conditions.
117
+ - Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s, no matter how unconventional they might seem. Utilize analogies, metaphors, or brainstorming techniques to envision possible conditions or paths from $A$ to $B$.
118
+ - The conditions $C$ should be multiple but in one sentence. And each condition should be concerned with one aspect/entity.
119
+ 4. Entity/Label Recognition of Conditions ($C$):
120
+ - Identify and categorize entities of Conditions ($C$) such as the names, locations, dates, specific technical terms or contextual parameters that might be associated with events, innovations post-2022.
121
+ - The output of the entities/labels will be used as tags or labels for semantic similarity searches. The entities/labels may be the words, or phrases, each of them should contain valuable, high information entropy information, and should be independent.
122
+ - Ensure that the identified entities are formatted in a manner suitable for database indexing and retrieval. Organize the entities into categories, and combine the category with its instance into a continuous phrase, without using colons or other separators.
123
+ - Format these entities for database indexing: output the category rather than its instance/content into a continuous phrase. For example, instead of "Jan. 02", identify it as "Event time".
124
+ 5. Quality Assessment ($Q$):
125
+ - Efficiency: How efficient is the transition from $A$ to $B$, which measures the resources used versus the desired outcome?
126
+ - Effectiveness: Did the transition achieve the desired outcome or was the target state achieved as intended?
127
+ - Safety & Risks: Assess any risks associated with the transition and the measures to mitigate them.
128
+ - Feedback Mechanisms: Incorporate feedback loops to continuously monitor and adjust the quality of transition, making it more adaptive.
129
+ 6. Iterative Evaluation:
130
+ - Test & Refine: Based on the initially deduced conditions and assessed quality, iterate the process to refine and optimize the transition. This might involve tweaking conditions, employing different paths, or changing resources.
131
+ - Feedback Integration: Use feedback to make improvements and increase the quality of the transition.
132
+ 7. Real-world scenarios often present challenges that may not be captured by models and frameworks. While using the model, maintain an adaptive mindset:
133
+ - Scenario Exploration: Continuously imagine various possible scenarios, both positive and negative, to prepare for unexpected events.
134
+ - Flexibility: Be prepared to modify conditions ($C$) or alter the path/process ($L$) if unforeseen challenges arise.
135
+ - Feedback Integration: Rapidly integrate feedback from actual implementations to adjust the model's application, ensuring relevancy and effectiveness.
136
+
137
+ ===== TASK =====
138
+ Given the starting state $A$ and the target state $B$, assuming that a path $L$ always exists between $A$ and $B$, how can one deduce or identify the necessary conditions $C$ and the quality $Q$ of the transition?
139
+
140
+ ===== STARTING STATE $A$ =====
141
+ {starting_state}
142
+
143
+ ===== TARGET STATE $B$ =====
144
+ {target_state}
145
+
146
+ {role_with_description_prompt}
147
+ ===== ANSWER TEMPLATE =====
148
+ - Characterization and comparison of $A$ and $B$:\n<BLANK>
149
+ - Historical & Empirical Analysis:\n<BLANK>/None
150
+ - Logical Deduction of Conditions ($C$) (multiple conditions can be deduced):
151
+ condition <NUM>:
152
+ <BLANK>.
153
+ - Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include square brackets)
154
+ - Quality Assessment ($Q$) (do not use symbols):
155
+ <BLANK>.
156
+ - Iterative Evaluation:\n<BLANK>/None"""
157
+
158
+ if role_descriptions_dict is not None:
159
+ role_names = role_descriptions_dict.keys()
160
+ role_with_description_prompt = (
161
+ "===== ROLES WITH DESCRIPTIONS =====\n"
162
+ + "\n".join(
163
+ f"{role_name}:\n{role_descriptions_dict[role_name]}\n"
164
+ for role_name in role_names
165
+ )
166
+ + "\n\n"
167
+ )
168
+ else:
169
+ role_with_description_prompt = ""
170
+ deduce_prompt = TextPrompt(deduce_prompt)
171
+
172
+ deduce = deduce_prompt.format(
173
+ starting_state=starting_state,
174
+ target_state=target_state,
175
+ role_with_description_prompt=role_with_description_prompt,
176
+ )
177
+
178
+ conditions_and_quality_generation_msg = BaseMessage.make_user_message(
179
+ role_name="Deductive Reasoner", content=deduce
180
+ )
181
+
182
+ response = self.step(
183
+ input_message=conditions_and_quality_generation_msg
184
+ )
185
+
186
+ if response.terminated:
187
+ raise RuntimeError(
188
+ "Deduction failed. Error:\n" + f"{response.info}"
189
+ )
190
+ msg: BaseMessage = response.msg
191
+ print(f"Message content:\n{msg.content}")
192
+
193
+ # Extract the conditions from the message
194
+ condistions_dict = {
195
+ f"condition {i}": cdt.replace("<", "")
196
+ .replace(">", "")
197
+ .strip()
198
+ .strip('\n')
199
+ for i, cdt in re.findall(
200
+ r"condition (\d+):\s*(.+?)(?=condition \d+|- Entity)",
201
+ msg.content,
202
+ re.DOTALL,
203
+ )
204
+ }
205
+
206
+ # Extract the labels from the message
207
+ labels = [
208
+ label.strip().strip('\n').strip("\"'")
209
+ for label in re.findall(
210
+ r"Entity/Label Recognition of Conditions:\n\[(.+?)\]",
211
+ msg.content,
212
+ re.DOTALL,
213
+ )[0].split(",")
214
+ ]
215
+
216
+ # Extract the quality from the message
217
+ quality = next(
218
+ q.strip().strip('\n')
219
+ for q in re.findall(
220
+ r"Quality Assessment \(\$Q\$\) \(do not use symbols\):"
221
+ r"\n(.+?)- Iterative",
222
+ msg.content,
223
+ re.DOTALL,
224
+ )
225
+ )
226
+
227
+ # Convert them into JSON format
228
+ conditions_and_quality_json: Dict[
229
+ str, Union[List[str], Dict[str, str]]
230
+ ] = {}
231
+ conditions_and_quality_json["conditions"] = condistions_dict
232
+ conditions_and_quality_json["labels"] = labels
233
+ conditions_and_quality_json["evaluate_quality"] = quality
234
+
235
+ return conditions_and_quality_json
@@ -11,15 +11,21 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Dict, List, Optional
14
+ from typing import Any, List, Optional
15
15
 
16
16
  from colorama import Fore
17
17
 
18
- from camel.agents import BaseToolAgent, ChatAgent, HuggingFaceToolAgent
18
+ from camel.agents.chat_agent import ChatAgent
19
+ from camel.agents.tool_agents.base import BaseToolAgent
20
+ from camel.interpreters import (
21
+ BaseInterpreter,
22
+ InternalPythonInterpreter,
23
+ SubprocessInterpreter,
24
+ )
19
25
  from camel.messages import BaseMessage
20
26
  from camel.responses import ChatAgentResponse
21
27
  from camel.types import ModelType
22
- from camel.utils import PythonInterpreter, print_text_animated
28
+ from camel.utils import print_text_animated
23
29
 
24
30
 
25
31
  class EmbodiedAgent(ChatAgent):
@@ -34,8 +40,13 @@ class EmbodiedAgent(ChatAgent):
34
40
  message_window_size (int, optional): The maximum number of previous
35
41
  messages to include in the context window. If `None`, no windowing
36
42
  is performed. (default: :obj:`None`)
37
- action_space (List[Any], optional): The action space for the embodied
38
- agent. (default: :obj:`None`)
43
+ tool_agents (List[BaseToolAgent], optional): The tools agents to use in
44
+ the embodied agent. (default: :obj:`None`)
45
+ code_interpreter (BaseInterpreter, optional): The code interpreter to
46
+ execute codes. If `code_interpreter` and `tool_agent` are both
47
+ `None`, default to `SubProcessInterpreter`. If `code_interpreter`
48
+ is `None` and `tool_agents` is not `None`, default to
49
+ `InternalPythonInterpreter`. (default: :obj:`None`)
39
50
  verbose (bool, optional): Whether to print the critic's messages.
40
51
  logger_color (Any): The color of the logger displayed to the user.
41
52
  (default: :obj:`Fore.MAGENTA`)
@@ -47,18 +58,22 @@ class EmbodiedAgent(ChatAgent):
47
58
  model_type: ModelType = ModelType.GPT_4,
48
59
  model_config: Optional[Any] = None,
49
60
  message_window_size: Optional[int] = None,
50
- action_space: Optional[List[BaseToolAgent]] = None,
61
+ tool_agents: Optional[List[BaseToolAgent]] = None,
62
+ code_interpreter: Optional[BaseInterpreter] = None,
51
63
  verbose: bool = False,
52
64
  logger_color: Any = Fore.MAGENTA,
53
65
  ) -> None:
54
- default_action_space = [
55
- HuggingFaceToolAgent('hugging_face_tool_agent',
56
- model_type=model_type.value),
57
- ]
58
- self.action_space = action_space or default_action_space
59
- action_space_prompt = self.get_action_space_prompt()
60
- system_message.content = system_message.content.format(
61
- action_space=action_space_prompt)
66
+ self.tool_agents = tool_agents
67
+ self.code_interpreter: BaseInterpreter
68
+ if code_interpreter is not None:
69
+ self.code_interpreter = code_interpreter
70
+ elif self.tool_agents:
71
+ self.code_interpreter = InternalPythonInterpreter()
72
+ else:
73
+ self.code_interpreter = SubprocessInterpreter()
74
+
75
+ if self.tool_agents:
76
+ system_message = self._set_tool_agents(system_message)
62
77
  self.verbose = verbose
63
78
  self.logger_color = logger_color
64
79
  super().__init__(
@@ -68,16 +83,45 @@ class EmbodiedAgent(ChatAgent):
68
83
  message_window_size=message_window_size,
69
84
  )
70
85
 
71
- def get_action_space_prompt(self) -> str:
86
+ def _set_tool_agents(self, system_message: BaseMessage) -> BaseMessage:
87
+ action_space_prompt = self._get_tool_agents_prompt()
88
+ result_message = system_message.create_new_instance(
89
+ content=system_message.content.format(
90
+ action_space=action_space_prompt
91
+ )
92
+ )
93
+ if self.tool_agents is not None:
94
+ self.code_interpreter.update_action_space(
95
+ {tool.name: tool for tool in self.tool_agents}
96
+ )
97
+ return result_message
98
+
99
+ def _get_tool_agents_prompt(self) -> str:
72
100
  r"""Returns the action space prompt.
73
101
 
74
102
  Returns:
75
103
  str: The action space prompt.
76
104
  """
77
- return "\n".join([
78
- f"*** {action.name} ***:\n {action.description}"
79
- for action in self.action_space
80
- ])
105
+ if self.tool_agents is not None:
106
+ return "\n".join(
107
+ [
108
+ f"*** {tool.name} ***:\n {tool.description}"
109
+ for tool in self.tool_agents
110
+ ]
111
+ )
112
+ else:
113
+ return ""
114
+
115
+ def get_tool_agent_names(self) -> List[str]:
116
+ r"""Returns the names of tool agents.
117
+
118
+ Returns:
119
+ List[str]: The names of tool agents.
120
+ """
121
+ if self.tool_agents is not None:
122
+ return [tool.name for tool in self.tool_agents]
123
+ else:
124
+ return []
81
125
 
82
126
  def step(
83
127
  self,
@@ -105,34 +149,42 @@ class EmbodiedAgent(ChatAgent):
105
149
 
106
150
  if self.verbose:
107
151
  for explanation, code in zip(explanations, codes):
108
- print_text_animated(self.logger_color +
109
- f"> Explanation:\n{explanation}")
152
+ print_text_animated(
153
+ self.logger_color + f"> Explanation:\n{explanation}"
154
+ )
110
155
  print_text_animated(self.logger_color + f"> Code:\n{code}")
111
156
 
112
157
  if len(explanations) > len(codes):
113
- print_text_animated(self.logger_color +
114
- f"> Explanation:\n{explanations}")
158
+ print_text_animated(
159
+ self.logger_color + f"> Explanation:\n{explanations[-1]}"
160
+ )
115
161
 
116
162
  content = response.msg.content
117
163
 
118
164
  if codes is not None:
119
- content = "\n> Executed Results:"
120
- action_space: Dict[str, Any] = {
121
- action.name: action
122
- for action in self.action_space
123
- }
124
- action_space.update({"print": print, "enumerate": enumerate})
125
- interpreter = PythonInterpreter(action_space=action_space)
126
- for block_idx, code in enumerate(codes):
127
- executed_outputs, _ = code.execute(interpreter)
128
- content += (f"Executing code block {block_idx}:\n"
129
- f" - execution output:\n{executed_outputs}\n"
130
- f" - Local variables:\n{interpreter.state}\n")
131
- content += "*" * 50 + "\n"
165
+ try:
166
+ content = "\n> Executed Results:\n"
167
+ for block_idx, code in enumerate(codes):
168
+ executed_output = self.code_interpreter.run(
169
+ code, code.code_type
170
+ )
171
+ content += (
172
+ f"Executing code block {block_idx}: {{\n"
173
+ + executed_output
174
+ + "}\n"
175
+ )
176
+ except InterruptedError as e:
177
+ content = (
178
+ f"\n> Running code fail: {e}\n"
179
+ "Please regenerate the code."
180
+ )
132
181
 
133
182
  # TODO: Handle errors
134
- content = input_message.content + (Fore.RESET +
135
- f"\n> Embodied Actions:\n{content}")
136
- message = BaseMessage(input_message.role_name, input_message.role_type,
137
- input_message.meta_dict, content)
183
+ content = input_message.content + f"\n> Embodied Actions:\n{content}"
184
+ message = BaseMessage(
185
+ input_message.role_name,
186
+ input_message.role_type,
187
+ input_message.meta_dict,
188
+ content,
189
+ )
138
190
  return ChatAgentResponse([message], response.terminated, response.info)
@@ -14,7 +14,7 @@
14
14
  import re
15
15
  from typing import Any, Dict, Optional, Union
16
16
 
17
- from camel.agents import ChatAgent
17
+ from camel.agents.chat_agent import ChatAgent
18
18
  from camel.messages import BaseMessage
19
19
  from camel.prompts import TextPrompt
20
20
  from camel.types import ModelType, RoleType
@@ -68,21 +68,27 @@ class RoleAssignmentAgent(ChatAgent):
68
68
  expert_prompt = "===== ANSWER PROMPT =====\n" + "\n".join(
69
69
  f"Domain expert {i + 1}: <BLANK>\n"
70
70
  f"Associated competencies, characteristics, duties "
71
- f"and workflows: <BLANK>. End." for i in range(num_roles or 0))
71
+ f"and workflows: <BLANK>. End."
72
+ for i in range(num_roles or 0)
73
+ )
72
74
  role_assignment_generation_prompt = TextPrompt(
73
- "You are a role assignment agent, and you're in charge of " +
74
- "recruiting {num_roles} experts for the following task." +
75
- "\n==== TASK =====\n {task}\n\n" +
76
- "Identify the domain experts you'd recruit and detail their " +
77
- "associated competencies, characteristics, duties and workflows " +
78
- "to complete the task.\n " +
79
- "Your answer MUST adhere to the format of ANSWER PROMPT, and " +
80
- "ONLY answer the BLANKs.\n" + expert_prompt)
75
+ "You are a role assignment agent, and you're in charge of "
76
+ + "recruiting {num_roles} experts for the following task."
77
+ + "\n==== TASK =====\n {task}\n\n"
78
+ + "Identify the domain experts you'd recruit and detail their "
79
+ + "associated competencies, characteristics, duties and workflows "
80
+ + "to complete the task.\n "
81
+ + "Your answer MUST adhere to the format of ANSWER PROMPT, and "
82
+ + "ONLY answer the BLANKs.\n"
83
+ + expert_prompt
84
+ )
81
85
  role_assignment_generation = role_assignment_generation_prompt.format(
82
- num_roles=num_roles, task=task_prompt)
86
+ num_roles=num_roles, task=task_prompt
87
+ )
83
88
 
84
89
  role_assignment_generation_msg = BaseMessage.make_user_message(
85
- role_name="Role Assigner", content=role_assignment_generation)
90
+ role_name="Role Assigner", content=role_assignment_generation
91
+ )
86
92
 
87
93
  response = self.step(input_message=role_assignment_generation_msg)
88
94
 
@@ -91,21 +97,25 @@ class RoleAssignmentAgent(ChatAgent):
91
97
 
92
98
  # Distribute the output completions into role names and descriptions
93
99
  role_names = [
94
- desc.replace("<|", "").replace("|>", "") for desc in re.findall(
100
+ desc.replace("<|", "").replace("|>", "")
101
+ for desc in re.findall(
95
102
  r"Domain expert \d: (.+?)\nAssociated competencies,",
96
103
  msg.content,
97
104
  re.DOTALL,
98
105
  )
99
106
  ]
100
107
  role_descriptions = [
101
- desc.replace("<|", "").replace("|>", "") for desc in re.findall(
108
+ desc.replace("<|", "").replace("|>", "")
109
+ for desc in re.findall(
102
110
  r"Associated competencies, characteristics, "
103
- r"duties and workflows: (.+?) End.", msg.content, re.DOTALL)
111
+ r"duties and workflows: (.+?) End.",
112
+ msg.content,
113
+ re.DOTALL,
114
+ )
104
115
  ]
105
116
 
106
117
  if len(role_names) != num_roles or len(role_descriptions) != num_roles:
107
- raise RuntimeError(
108
- "Got None or insufficient information of roles.")
118
+ raise RuntimeError("Got None or insufficient information of roles.")
109
119
  if terminated:
110
120
  raise RuntimeError("Role assignment failed.")
111
121