camel-ai 0.1.1__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (117) hide show
  1. camel/__init__.py +1 -11
  2. camel/agents/__init__.py +7 -5
  3. camel/agents/chat_agent.py +134 -86
  4. camel/agents/critic_agent.py +28 -17
  5. camel/agents/deductive_reasoner_agent.py +235 -0
  6. camel/agents/embodied_agent.py +92 -40
  7. camel/agents/knowledge_graph_agent.py +221 -0
  8. camel/agents/role_assignment_agent.py +27 -17
  9. camel/agents/task_agent.py +60 -34
  10. camel/agents/tool_agents/base.py +0 -1
  11. camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
  12. camel/configs/__init__.py +29 -0
  13. camel/configs/anthropic_config.py +73 -0
  14. camel/configs/base_config.py +22 -0
  15. camel/{configs.py → configs/openai_config.py} +37 -64
  16. camel/embeddings/__init__.py +2 -0
  17. camel/embeddings/base.py +3 -2
  18. camel/embeddings/openai_embedding.py +10 -5
  19. camel/embeddings/sentence_transformers_embeddings.py +65 -0
  20. camel/functions/__init__.py +18 -3
  21. camel/functions/google_maps_function.py +335 -0
  22. camel/functions/math_functions.py +7 -7
  23. camel/functions/open_api_function.py +380 -0
  24. camel/functions/open_api_specs/coursera/__init__.py +13 -0
  25. camel/functions/open_api_specs/coursera/openapi.yaml +82 -0
  26. camel/functions/open_api_specs/klarna/__init__.py +13 -0
  27. camel/functions/open_api_specs/klarna/openapi.yaml +87 -0
  28. camel/functions/open_api_specs/speak/__init__.py +13 -0
  29. camel/functions/open_api_specs/speak/openapi.yaml +151 -0
  30. camel/functions/openai_function.py +346 -42
  31. camel/functions/retrieval_functions.py +61 -0
  32. camel/functions/search_functions.py +100 -35
  33. camel/functions/slack_functions.py +275 -0
  34. camel/functions/twitter_function.py +484 -0
  35. camel/functions/weather_functions.py +36 -23
  36. camel/generators.py +65 -46
  37. camel/human.py +17 -11
  38. camel/interpreters/__init__.py +25 -0
  39. camel/interpreters/base.py +49 -0
  40. camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
  41. camel/interpreters/interpreter_error.py +19 -0
  42. camel/interpreters/subprocess_interpreter.py +190 -0
  43. camel/loaders/__init__.py +22 -0
  44. camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
  45. camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
  46. camel/memories/__init__.py +17 -7
  47. camel/memories/agent_memories.py +156 -0
  48. camel/memories/base.py +97 -32
  49. camel/memories/blocks/__init__.py +21 -0
  50. camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
  51. camel/memories/blocks/vectordb_block.py +101 -0
  52. camel/memories/context_creators/__init__.py +3 -2
  53. camel/memories/context_creators/score_based.py +32 -20
  54. camel/memories/records.py +6 -5
  55. camel/messages/__init__.py +2 -2
  56. camel/messages/base.py +99 -16
  57. camel/messages/func_message.py +7 -4
  58. camel/models/__init__.py +6 -2
  59. camel/models/anthropic_model.py +146 -0
  60. camel/models/base_model.py +10 -3
  61. camel/models/model_factory.py +17 -11
  62. camel/models/open_source_model.py +25 -13
  63. camel/models/openai_audio_models.py +251 -0
  64. camel/models/openai_model.py +20 -13
  65. camel/models/stub_model.py +10 -5
  66. camel/prompts/__init__.py +7 -5
  67. camel/prompts/ai_society.py +21 -14
  68. camel/prompts/base.py +54 -47
  69. camel/prompts/code.py +22 -14
  70. camel/prompts/evaluation.py +8 -5
  71. camel/prompts/misalignment.py +26 -19
  72. camel/prompts/object_recognition.py +35 -0
  73. camel/prompts/prompt_templates.py +14 -8
  74. camel/prompts/role_description_prompt_template.py +16 -10
  75. camel/prompts/solution_extraction.py +9 -5
  76. camel/prompts/task_prompt_template.py +24 -21
  77. camel/prompts/translation.py +9 -5
  78. camel/responses/agent_responses.py +5 -2
  79. camel/retrievers/__init__.py +26 -0
  80. camel/retrievers/auto_retriever.py +330 -0
  81. camel/retrievers/base.py +69 -0
  82. camel/retrievers/bm25_retriever.py +140 -0
  83. camel/retrievers/cohere_rerank_retriever.py +108 -0
  84. camel/retrievers/vector_retriever.py +183 -0
  85. camel/societies/__init__.py +1 -1
  86. camel/societies/babyagi_playing.py +56 -32
  87. camel/societies/role_playing.py +188 -133
  88. camel/storages/__init__.py +18 -0
  89. camel/storages/graph_storages/__init__.py +23 -0
  90. camel/storages/graph_storages/base.py +82 -0
  91. camel/storages/graph_storages/graph_element.py +74 -0
  92. camel/storages/graph_storages/neo4j_graph.py +582 -0
  93. camel/storages/key_value_storages/base.py +1 -2
  94. camel/storages/key_value_storages/in_memory.py +1 -2
  95. camel/storages/key_value_storages/json.py +8 -13
  96. camel/storages/vectordb_storages/__init__.py +33 -0
  97. camel/storages/vectordb_storages/base.py +202 -0
  98. camel/storages/vectordb_storages/milvus.py +396 -0
  99. camel/storages/vectordb_storages/qdrant.py +373 -0
  100. camel/terminators/__init__.py +1 -1
  101. camel/terminators/base.py +2 -3
  102. camel/terminators/response_terminator.py +21 -12
  103. camel/terminators/token_limit_terminator.py +5 -3
  104. camel/toolkits/__init__.py +21 -0
  105. camel/toolkits/base.py +22 -0
  106. camel/toolkits/github_toolkit.py +245 -0
  107. camel/types/__init__.py +18 -6
  108. camel/types/enums.py +129 -15
  109. camel/types/openai_types.py +10 -5
  110. camel/utils/__init__.py +20 -13
  111. camel/utils/commons.py +170 -85
  112. camel/utils/token_counting.py +135 -15
  113. {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/METADATA +123 -75
  114. camel_ai-0.1.4.dist-info/RECORD +119 -0
  115. {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/WHEEL +1 -1
  116. camel/memories/context_creators/base.py +0 -72
  117. camel_ai-0.1.1.dist-info/RECORD +0 -75
@@ -0,0 +1,235 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import re
15
+ from typing import Dict, List, Optional, Union
16
+
17
+ from camel.agents.chat_agent import ChatAgent
18
+ from camel.configs import BaseConfig
19
+ from camel.messages import BaseMessage
20
+ from camel.prompts import TextPrompt
21
+ from camel.types import ModelType, RoleType
22
+
23
+
24
+ class DeductiveReasonerAgent(ChatAgent):
25
+ r"""An agent responsible for deductive reasoning. Model of deductive
26
+ reasoning:
27
+ - L: A ⊕ C -> q * B
28
+ - A represents the known starting state.
29
+ - B represents the known target state.
30
+ - C represents the conditions required to transition from A to B.
31
+ - Q represents the quality or effectiveness of the transition from
32
+ A to B.
33
+ - L represents the path or process from A to B.
34
+
35
+ Args:
36
+ model_type (ModelType, optional): The type of model to use for the
37
+ agent. (default: :obj: `None`)
38
+ model_config (BaseConfig, optional): The configuration for the model.
39
+ (default: :obj:`None`)
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ model_type: Optional[ModelType] = None,
45
+ model_config: Optional[BaseConfig] = None,
46
+ ) -> None:
47
+ system_message = BaseMessage(
48
+ role_name="Insight Agent",
49
+ role_type=RoleType.ASSISTANT,
50
+ meta_dict=None,
51
+ content="You assign roles based on tasks.",
52
+ )
53
+ super().__init__(system_message, model_type, model_config)
54
+
55
+ def deduce_conditions_and_quality(
56
+ self,
57
+ starting_state: str,
58
+ target_state: str,
59
+ role_descriptions_dict: Optional[Dict[str, str]] = None,
60
+ ) -> Dict[str, Union[List[str], Dict[str, str]]]:
61
+ r"""Derives the conditions and quality from the starting state and the
62
+ target state based on the model of the deductive reasoning and the
63
+ knowledge base. It can optionally consider the roles involved in the
64
+ scenario, which allows tailoring the output more closely to the AI
65
+ agent's environment.
66
+
67
+ Args:
68
+ starting_state (str): The initial or starting state from which
69
+ conditions are deduced.
70
+ target_state (str): The target state of the task.
71
+ role_descriptions_dict (Optional[Dict[str, str]], optional): The
72
+ descriptions of the roles. (default: :obj:`None`)
73
+ role_descriptions_dict (Optional[Dict[str, str]], optional): A
74
+ dictionary describing the roles involved in the scenario. This
75
+ is optional and can be used to provide a context for the
76
+ CAMEL's role-playing, enabling the generation of more relevant
77
+ and tailored conditions and quality assessments. This could be
78
+ generated using a `RoleAssignmentAgent()` or defined manually
79
+ by the user.
80
+
81
+ Returns:
82
+ Dict[str, Union[List[str], Dict[str, str]]]: A dictionary with the
83
+ extracted data from the message. The dictionary contains three
84
+ keys:
85
+ - 'conditions': A list where each key is a condition ID and
86
+ each value is the corresponding condition text.
87
+ - 'labels': A list of label strings extracted from the message.
88
+ - 'quality': A string of quality assessment strings extracted
89
+ from the message.
90
+ """
91
+ self.reset()
92
+
93
+ deduce_prompt = """You are a deductive reasoner. You are tasked to complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the STARTING STATE A and the TARGET STATE B. You are given the CONTEXT CONTENT to help you complete the TASK.
94
+ Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY fill in the BLANKs, and DO NOT alter or modify any other part of the template
95
+
96
+ ===== MODELING OF DEDUCTIVE REASONING =====
97
+ You are tasked with understanding a mathematical model based on the components ${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
98
+ - $A$ represents the known starting state.
99
+ - $B$ represents the known target state.
100
+ - $C$ represents the conditions required to transition from $A$ to $B$.
101
+ - $Q$ represents the quality or effectiveness of the transition from $A$ to $B$.
102
+ - $L$ represents the path or process from $A$ to $B$.
103
+
104
+ ===== THOUGHT OF DEDUCTIVE REASONING =====
105
+ 1. Define the Parameters of A and B:
106
+ - Characterization: Before delving into transitions, thoroughly understand the nature and boundaries of both $A$ and $B$. This includes the type, properties, constraints, and possible interactions between the two.
107
+ - Contrast and Compare: Highlight the similarities and differences between $A$ and $B$. This comparative analysis will give an insight into what needs changing and what remains constant.
108
+ 2. Historical & Empirical Analysis:
109
+ - Previous Transitions according to the Knowledge Base of GPT: (if applicable) Extract conditions and patterns from the historical instances where a similar transition from a state comparable to $A$ moved towards $B$.
110
+ - Scientific Principles: (if applicable) Consider the underlying scientific principles governing or related to the states and their transition. For example, if $A$ and $B$ are physical states, laws of physics might apply.
111
+ 3. Logical Deduction of Conditions ($C$):
112
+ - Direct Path Analysis: What are the immediate and direct conditions required to move from $A$ to $B$?
113
+ - Intermediate States: Are there states between $A$ and $B$ that must be transversed or can be used to make the transition smoother or more efficient? If yes, what is the content?
114
+ - Constraints & Limitations: Identify potential barriers or restrictions in moving from $A$ to $B$. These can be external (e.g., environmental factors) or internal (properties of $A$ or $B$).
115
+ - Resource and Information Analysis: What resources and information are required for the transition? This could be time, entity, factor, code language, software platform, unknowns, etc.
116
+ - External Influences: Consider socio-economic, political, or environmental factors (if applicable) that could influence the transition conditions.
117
+ - Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s, no matter how unconventional they might seem. Utilize analogies, metaphors, or brainstorming techniques to envision possible conditions or paths from $A$ to $B$.
118
+ - The conditions $C$ should be multiple but in one sentence. And each condition should be concerned with one aspect/entity.
119
+ 4. Entity/Label Recognition of Conditions ($C$):
120
+ - Identify and categorize entities of Conditions ($C$) such as the names, locations, dates, specific technical terms or contextual parameters that might be associated with events, innovations post-2022.
121
+ - The output of the entities/labels will be used as tags or labels for semantic similarity searches. The entities/labels may be the words, or phrases, each of them should contain valuable, high information entropy information, and should be independent.
122
+ - Ensure that the identified entities are formatted in a manner suitable for database indexing and retrieval. Organize the entities into categories, and combine the category with its instance into a continuous phrase, without using colons or other separators.
123
+ - Format these entities for database indexing: output the category rather than its instance/content into a continuous phrase. For example, instead of "Jan. 02", identify it as "Event time".
124
+ 5. Quality Assessment ($Q$):
125
+ - Efficiency: How efficient is the transition from $A$ to $B$, which measures the resources used versus the desired outcome?
126
+ - Effectiveness: Did the transition achieve the desired outcome or was the target state achieved as intended?
127
+ - Safety & Risks: Assess any risks associated with the transition and the measures to mitigate them.
128
+ - Feedback Mechanisms: Incorporate feedback loops to continuously monitor and adjust the quality of transition, making it more adaptive.
129
+ 6. Iterative Evaluation:
130
+ - Test & Refine: Based on the initially deduced conditions and assessed quality, iterate the process to refine and optimize the transition. This might involve tweaking conditions, employing different paths, or changing resources.
131
+ - Feedback Integration: Use feedback to make improvements and increase the quality of the transition.
132
+ 7. Real-world scenarios often present challenges that may not be captured by models and frameworks. While using the model, maintain an adaptive mindset:
133
+ - Scenario Exploration: Continuously imagine various possible scenarios, both positive and negative, to prepare for unexpected events.
134
+ - Flexibility: Be prepared to modify conditions ($C$) or alter the path/process ($L$) if unforeseen challenges arise.
135
+ - Feedback Integration: Rapidly integrate feedback from actual implementations to adjust the model's application, ensuring relevancy and effectiveness.
136
+
137
+ ===== TASK =====
138
+ Given the starting state $A$ and the target state $B$, assuming that a path $L$ always exists between $A$ and $B$, how can one deduce or identify the necessary conditions $C$ and the quality $Q$ of the transition?
139
+
140
+ ===== STARTING STATE $A$ =====
141
+ {starting_state}
142
+
143
+ ===== TARGET STATE $B$ =====
144
+ {target_state}
145
+
146
+ {role_with_description_prompt}
147
+ ===== ANSWER TEMPLATE =====
148
+ - Characterization and comparison of $A$ and $B$:\n<BLANK>
149
+ - Historical & Empirical Analysis:\n<BLANK>/None
150
+ - Logical Deduction of Conditions ($C$) (multiple conditions can be deduced):
151
+ condition <NUM>:
152
+ <BLANK>.
153
+ - Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include square brackets)
154
+ - Quality Assessment ($Q$) (do not use symbols):
155
+ <BLANK>.
156
+ - Iterative Evaluation:\n<BLANK>/None"""
157
+
158
+ if role_descriptions_dict is not None:
159
+ role_names = role_descriptions_dict.keys()
160
+ role_with_description_prompt = (
161
+ "===== ROLES WITH DESCRIPTIONS =====\n"
162
+ + "\n".join(
163
+ f"{role_name}:\n{role_descriptions_dict[role_name]}\n"
164
+ for role_name in role_names
165
+ )
166
+ + "\n\n"
167
+ )
168
+ else:
169
+ role_with_description_prompt = ""
170
+ deduce_prompt = TextPrompt(deduce_prompt)
171
+
172
+ deduce = deduce_prompt.format(
173
+ starting_state=starting_state,
174
+ target_state=target_state,
175
+ role_with_description_prompt=role_with_description_prompt,
176
+ )
177
+
178
+ conditions_and_quality_generation_msg = BaseMessage.make_user_message(
179
+ role_name="Deductive Reasoner", content=deduce
180
+ )
181
+
182
+ response = self.step(
183
+ input_message=conditions_and_quality_generation_msg
184
+ )
185
+
186
+ if response.terminated:
187
+ raise RuntimeError(
188
+ "Deduction failed. Error:\n" + f"{response.info}"
189
+ )
190
+ msg: BaseMessage = response.msg
191
+ print(f"Message content:\n{msg.content}")
192
+
193
+ # Extract the conditions from the message
194
+ condistions_dict = {
195
+ f"condition {i}": cdt.replace("<", "")
196
+ .replace(">", "")
197
+ .strip()
198
+ .strip('\n')
199
+ for i, cdt in re.findall(
200
+ r"condition (\d+):\s*(.+?)(?=condition \d+|- Entity)",
201
+ msg.content,
202
+ re.DOTALL,
203
+ )
204
+ }
205
+
206
+ # Extract the labels from the message
207
+ labels = [
208
+ label.strip().strip('\n').strip("\"'")
209
+ for label in re.findall(
210
+ r"Entity/Label Recognition of Conditions:\n\[(.+?)\]",
211
+ msg.content,
212
+ re.DOTALL,
213
+ )[0].split(",")
214
+ ]
215
+
216
+ # Extract the quality from the message
217
+ quality = next(
218
+ q.strip().strip('\n')
219
+ for q in re.findall(
220
+ r"Quality Assessment \(\$Q\$\) \(do not use symbols\):"
221
+ r"\n(.+?)- Iterative",
222
+ msg.content,
223
+ re.DOTALL,
224
+ )
225
+ )
226
+
227
+ # Convert them into JSON format
228
+ conditions_and_quality_json: Dict[
229
+ str, Union[List[str], Dict[str, str]]
230
+ ] = {}
231
+ conditions_and_quality_json["conditions"] = condistions_dict
232
+ conditions_and_quality_json["labels"] = labels
233
+ conditions_and_quality_json["evaluate_quality"] = quality
234
+
235
+ return conditions_and_quality_json
@@ -11,15 +11,21 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Dict, List, Optional
14
+ from typing import Any, List, Optional
15
15
 
16
16
  from colorama import Fore
17
17
 
18
- from camel.agents import BaseToolAgent, ChatAgent, HuggingFaceToolAgent
18
+ from camel.agents.chat_agent import ChatAgent
19
+ from camel.agents.tool_agents.base import BaseToolAgent
20
+ from camel.interpreters import (
21
+ BaseInterpreter,
22
+ InternalPythonInterpreter,
23
+ SubprocessInterpreter,
24
+ )
19
25
  from camel.messages import BaseMessage
20
26
  from camel.responses import ChatAgentResponse
21
27
  from camel.types import ModelType
22
- from camel.utils import PythonInterpreter, print_text_animated
28
+ from camel.utils import print_text_animated
23
29
 
24
30
 
25
31
  class EmbodiedAgent(ChatAgent):
@@ -34,8 +40,13 @@ class EmbodiedAgent(ChatAgent):
34
40
  message_window_size (int, optional): The maximum number of previous
35
41
  messages to include in the context window. If `None`, no windowing
36
42
  is performed. (default: :obj:`None`)
37
- action_space (List[Any], optional): The action space for the embodied
38
- agent. (default: :obj:`None`)
43
+ tool_agents (List[BaseToolAgent], optional): The tools agents to use in
44
+ the embodied agent. (default: :obj:`None`)
45
+ code_interpreter (BaseInterpreter, optional): The code interpreter to
46
+ execute codes. If `code_interpreter` and `tool_agent` are both
47
+ `None`, default to `SubProcessInterpreter`. If `code_interpreter`
48
+ is `None` and `tool_agents` is not `None`, default to
49
+ `InternalPythonInterpreter`. (default: :obj:`None`)
39
50
  verbose (bool, optional): Whether to print the critic's messages.
40
51
  logger_color (Any): The color of the logger displayed to the user.
41
52
  (default: :obj:`Fore.MAGENTA`)
@@ -47,18 +58,22 @@ class EmbodiedAgent(ChatAgent):
47
58
  model_type: ModelType = ModelType.GPT_4,
48
59
  model_config: Optional[Any] = None,
49
60
  message_window_size: Optional[int] = None,
50
- action_space: Optional[List[BaseToolAgent]] = None,
61
+ tool_agents: Optional[List[BaseToolAgent]] = None,
62
+ code_interpreter: Optional[BaseInterpreter] = None,
51
63
  verbose: bool = False,
52
64
  logger_color: Any = Fore.MAGENTA,
53
65
  ) -> None:
54
- default_action_space = [
55
- HuggingFaceToolAgent('hugging_face_tool_agent',
56
- model_type=model_type.value),
57
- ]
58
- self.action_space = action_space or default_action_space
59
- action_space_prompt = self.get_action_space_prompt()
60
- system_message.content = system_message.content.format(
61
- action_space=action_space_prompt)
66
+ self.tool_agents = tool_agents
67
+ self.code_interpreter: BaseInterpreter
68
+ if code_interpreter is not None:
69
+ self.code_interpreter = code_interpreter
70
+ elif self.tool_agents:
71
+ self.code_interpreter = InternalPythonInterpreter()
72
+ else:
73
+ self.code_interpreter = SubprocessInterpreter()
74
+
75
+ if self.tool_agents:
76
+ system_message = self._set_tool_agents(system_message)
62
77
  self.verbose = verbose
63
78
  self.logger_color = logger_color
64
79
  super().__init__(
@@ -68,16 +83,45 @@ class EmbodiedAgent(ChatAgent):
68
83
  message_window_size=message_window_size,
69
84
  )
70
85
 
71
- def get_action_space_prompt(self) -> str:
86
+ def _set_tool_agents(self, system_message: BaseMessage) -> BaseMessage:
87
+ action_space_prompt = self._get_tool_agents_prompt()
88
+ result_message = system_message.create_new_instance(
89
+ content=system_message.content.format(
90
+ action_space=action_space_prompt
91
+ )
92
+ )
93
+ if self.tool_agents is not None:
94
+ self.code_interpreter.update_action_space(
95
+ {tool.name: tool for tool in self.tool_agents}
96
+ )
97
+ return result_message
98
+
99
+ def _get_tool_agents_prompt(self) -> str:
72
100
  r"""Returns the action space prompt.
73
101
 
74
102
  Returns:
75
103
  str: The action space prompt.
76
104
  """
77
- return "\n".join([
78
- f"*** {action.name} ***:\n {action.description}"
79
- for action in self.action_space
80
- ])
105
+ if self.tool_agents is not None:
106
+ return "\n".join(
107
+ [
108
+ f"*** {tool.name} ***:\n {tool.description}"
109
+ for tool in self.tool_agents
110
+ ]
111
+ )
112
+ else:
113
+ return ""
114
+
115
+ def get_tool_agent_names(self) -> List[str]:
116
+ r"""Returns the names of tool agents.
117
+
118
+ Returns:
119
+ List[str]: The names of tool agents.
120
+ """
121
+ if self.tool_agents is not None:
122
+ return [tool.name for tool in self.tool_agents]
123
+ else:
124
+ return []
81
125
 
82
126
  def step(
83
127
  self,
@@ -105,34 +149,42 @@ class EmbodiedAgent(ChatAgent):
105
149
 
106
150
  if self.verbose:
107
151
  for explanation, code in zip(explanations, codes):
108
- print_text_animated(self.logger_color +
109
- f"> Explanation:\n{explanation}")
152
+ print_text_animated(
153
+ self.logger_color + f"> Explanation:\n{explanation}"
154
+ )
110
155
  print_text_animated(self.logger_color + f"> Code:\n{code}")
111
156
 
112
157
  if len(explanations) > len(codes):
113
- print_text_animated(self.logger_color +
114
- f"> Explanation:\n{explanations}")
158
+ print_text_animated(
159
+ self.logger_color + f"> Explanation:\n{explanations[-1]}"
160
+ )
115
161
 
116
162
  content = response.msg.content
117
163
 
118
164
  if codes is not None:
119
- content = "\n> Executed Results:"
120
- action_space: Dict[str, Any] = {
121
- action.name: action
122
- for action in self.action_space
123
- }
124
- action_space.update({"print": print, "enumerate": enumerate})
125
- interpreter = PythonInterpreter(action_space=action_space)
126
- for block_idx, code in enumerate(codes):
127
- executed_outputs, _ = code.execute(interpreter)
128
- content += (f"Executing code block {block_idx}:\n"
129
- f" - execution output:\n{executed_outputs}\n"
130
- f" - Local variables:\n{interpreter.state}\n")
131
- content += "*" * 50 + "\n"
165
+ try:
166
+ content = "\n> Executed Results:\n"
167
+ for block_idx, code in enumerate(codes):
168
+ executed_output = self.code_interpreter.run(
169
+ code, code.code_type
170
+ )
171
+ content += (
172
+ f"Executing code block {block_idx}: {{\n"
173
+ + executed_output
174
+ + "}\n"
175
+ )
176
+ except InterruptedError as e:
177
+ content = (
178
+ f"\n> Running code fail: {e}\n"
179
+ "Please regenerate the code."
180
+ )
132
181
 
133
182
  # TODO: Handle errors
134
- content = input_message.content + (Fore.RESET +
135
- f"\n> Embodied Actions:\n{content}")
136
- message = BaseMessage(input_message.role_name, input_message.role_type,
137
- input_message.meta_dict, content)
183
+ content = input_message.content + f"\n> Embodied Actions:\n{content}"
184
+ message = BaseMessage(
185
+ input_message.role_name,
186
+ input_message.role_type,
187
+ input_message.meta_dict,
188
+ content,
189
+ )
138
190
  return ChatAgentResponse([message], response.terminated, response.info)
@@ -0,0 +1,221 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Any, Optional, Union
15
+
16
+ from unstructured.documents.elements import Element
17
+
18
+ from camel.agents import ChatAgent
19
+ from camel.messages import BaseMessage
20
+ from camel.prompts import TextPrompt
21
+ from camel.storages.graph_storages.graph_element import (
22
+ GraphElement,
23
+ Node,
24
+ Relationship,
25
+ )
26
+ from camel.types import ModelType, RoleType
27
+
28
+ text_prompt = """
29
+ You are tasked with extracting nodes and relationships from given content and structures them into Node and Relationship objects. Here's the outline of what you needs to do:
30
+
31
+ Content Extraction:
32
+ You should be able to process input content and identify entities mentioned within it.
33
+ Entities can be any noun phrases or concepts that represent distinct entities in the context of the given content.
34
+
35
+ Node Extraction:
36
+ For each identified entity, you should create a Node object.
37
+ Each Node object should have a unique identifier (id) and a type (type).
38
+ Additional properties associated with the node can also be extracted and stored.
39
+
40
+ Relationship Extraction:
41
+ You should identify relationships between entities mentioned in the content.
42
+ For each relationship, create a Relationship object.
43
+ A Relationship object should have a subject (subj) and an object (obj) which are Node objects representing the entities involved in the relationship.
44
+ Each relationship should also have a type (type), and additional properties if applicable.
45
+
46
+ Output Formatting:
47
+ The extracted nodes and relationships should be formatted as instances of the provided Node and Relationship classes.
48
+ Ensure that the extracted data adheres to the structure defined by the classes.
49
+ Output the structured data in a format that can be easily validated against the provided code.
50
+
51
+ Instructions for you:
52
+ Read the provided content thoroughly.
53
+ Identify distinct entities mentioned in the content and categorize them as nodes.
54
+ Determine relationships between these entities and represent them as directed relationships.
55
+ Provide the extracted nodes and relationships in the specified format below.
56
+ Example for you:
57
+
58
+ Example Content:
59
+ "John works at XYZ Corporation. He is a software engineer. The company is located in New York City."
60
+
61
+ Expected Output:
62
+
63
+ Nodes:
64
+
65
+ Node(id='John', type='Person', properties={'agent_generated'})
66
+ Node(id='XYZ Corporation', type='Organization', properties={'agent_generated'})
67
+ Node(id='New York City', type='Location', properties={'agent_generated'})
68
+
69
+ Relationships:
70
+
71
+ Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ Corporation', type='Organization'), type='WorksAt', properties={'agent_generated'})
72
+ Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City', type='Location'), type='ResidesIn', properties={'agent_generated'})
73
+
74
+ ===== TASK =====
75
+ Please extracts nodes and relationships from given content and structures them into Node and Relationship objects.
76
+
77
+ {task}
78
+ """
79
+
80
+
81
+ class KnowledgeGraphAgent(ChatAgent):
82
+ r"""An agent that can extract node and relationship information for different entities from given `Element` content.
83
+
84
+ Attributes:
85
+ task_prompt (TextPrompt): A prompt for the agent to extract node and
86
+ relationship information for different entities.
87
+ """
88
+
89
+ def __init__(
90
+ self,
91
+ model_type: ModelType = ModelType.GPT_3_5_TURBO,
92
+ model_config: Optional[Any] = None,
93
+ ) -> None:
94
+ r"""Initialize the `KnowledgeGraphAgent`.
95
+
96
+ Args:
97
+ model_type (ModelType, optional): The type of model to use for the
98
+ agent. Defaults to `ModelType.GPT_3_5_TURBO`.
99
+ model_config (Any, optional): The configuration for the model.
100
+ Defaults to `None`.
101
+ """
102
+ system_message = BaseMessage(
103
+ role_name="Graphify",
104
+ role_type=RoleType.ASSISTANT,
105
+ meta_dict=None,
106
+ content="Your mission is to transform unstructured content "
107
+ "intostructured graph data. Extract nodes and relationships with "
108
+ "precision, and let the connections unfold. Your graphs will "
109
+ "illuminate the hidden connections within the chaos of information.",
110
+ )
111
+ super().__init__(system_message, model_type, model_config)
112
+
113
+ def run(
114
+ self,
115
+ element: Union[str, Element],
116
+ parse_graph_elements: bool = False,
117
+ ) -> Union[str, GraphElement]:
118
+ r"""Run the agent to extract node and relationship information.
119
+
120
+ Args:
121
+ element (Union[str, Element]): The input element or string.
122
+ parse_graph_elements (bool, optional): Whether to parse into
123
+ `GraphElement`. Defaults to `False`.
124
+
125
+ Returns:
126
+ Union[str, GraphElement]: The extracted node and relationship
127
+ information. If `parse_graph_elements` is `True` then return `GraphElement`, else return `str`.
128
+ """
129
+ self.reset()
130
+ self.element = element
131
+
132
+ knowledge_graph_prompt = TextPrompt(text_prompt)
133
+ knowledge_graph_generation = knowledge_graph_prompt.format(
134
+ task=str(element)
135
+ )
136
+
137
+ knowledge_graph_generation_msg = BaseMessage.make_user_message(
138
+ role_name="Graphify", content=knowledge_graph_generation
139
+ )
140
+
141
+ response = self.step(input_message=knowledge_graph_generation_msg)
142
+
143
+ content = response.msg.content
144
+
145
+ if parse_graph_elements:
146
+ content = self._parse_graph_elements(content)
147
+
148
+ return content
149
+
150
+ def _validate_node(self, node: Node) -> bool:
151
+ r"""Validate if the object is a valid Node.
152
+
153
+ Args:
154
+ node (Node): Object to be validated.
155
+
156
+ Returns:
157
+ bool: True if the object is a valid Node, False otherwise.
158
+ """
159
+ return (
160
+ isinstance(node, Node)
161
+ and isinstance(node.id, (str, int))
162
+ and isinstance(node.type, str)
163
+ )
164
+
165
+ def _validate_relationship(self, relationship: Relationship) -> bool:
166
+ r"""Validate if the object is a valid Relationship.
167
+
168
+ Args:
169
+ relationship (Relationship): Object to be validated.
170
+
171
+ Returns:
172
+ bool: True if the object is a valid Relationship, False otherwise.
173
+ """
174
+ return (
175
+ isinstance(relationship, Relationship)
176
+ and self._validate_node(relationship.subj)
177
+ and self._validate_node(relationship.obj)
178
+ and isinstance(relationship.type, str)
179
+ )
180
+
181
+ def _parse_graph_elements(self, input_string: str) -> GraphElement:
182
+ r"""Parses graph elements from given content.
183
+
184
+ Args:
185
+ input_string (str): The input content.
186
+
187
+ Returns:
188
+ GraphElement: The parsed graph elements.
189
+ """
190
+ import re
191
+
192
+ # Regular expressions to extract nodes and relationships
193
+ node_pattern = r"Node\(id='(.*?)', type='(.*?)', properties=(.*?)\)"
194
+ rel_pattern = r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', properties=\{(.*?)\}\)"
195
+
196
+ nodes = {}
197
+ relationships = []
198
+
199
+ # Extract nodes
200
+ for match in re.finditer(node_pattern, input_string):
201
+ id, type, properties = match.groups()
202
+ properties = eval(properties)
203
+ if id not in nodes:
204
+ node = Node(id, type, properties)
205
+ if self._validate_node(node):
206
+ nodes[id] = node
207
+
208
+ # Extract relationships
209
+ for match in re.finditer(rel_pattern, input_string):
210
+ subj_id, subj_type, obj_id, obj_type, rel_type, properties_str = (
211
+ match.groups()
212
+ )
213
+ properties = eval(properties_str)
214
+ if subj_id in nodes and obj_id in nodes:
215
+ subj = nodes[subj_id]
216
+ obj = nodes[obj_id]
217
+ relationship = Relationship(subj, obj, rel_type, properties)
218
+ if self._validate_relationship(relationship):
219
+ relationships.append(relationship)
220
+
221
+ return GraphElement(list(nodes.values()), relationships, self.element)