camel-ai 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (75) hide show
  1. camel/__init__.py +30 -0
  2. camel/agents/__init__.py +40 -0
  3. camel/agents/base.py +29 -0
  4. camel/agents/chat_agent.py +539 -0
  5. camel/agents/critic_agent.py +179 -0
  6. camel/agents/embodied_agent.py +138 -0
  7. camel/agents/role_assignment_agent.py +117 -0
  8. camel/agents/task_agent.py +382 -0
  9. camel/agents/tool_agents/__init__.py +20 -0
  10. camel/agents/tool_agents/base.py +40 -0
  11. camel/agents/tool_agents/hugging_face_tool_agent.py +203 -0
  12. camel/configs.py +159 -0
  13. camel/embeddings/__init__.py +20 -0
  14. camel/embeddings/base.py +65 -0
  15. camel/embeddings/openai_embedding.py +74 -0
  16. camel/functions/__init__.py +27 -0
  17. camel/functions/base_io_functions.py +261 -0
  18. camel/functions/math_functions.py +61 -0
  19. camel/functions/openai_function.py +88 -0
  20. camel/functions/search_functions.py +309 -0
  21. camel/functions/unstructured_io_fuctions.py +616 -0
  22. camel/functions/weather_functions.py +136 -0
  23. camel/generators.py +263 -0
  24. camel/human.py +130 -0
  25. camel/memories/__init__.py +28 -0
  26. camel/memories/base.py +75 -0
  27. camel/memories/chat_history_memory.py +111 -0
  28. camel/memories/context_creators/__init__.py +18 -0
  29. camel/memories/context_creators/base.py +72 -0
  30. camel/memories/context_creators/score_based.py +130 -0
  31. camel/memories/records.py +92 -0
  32. camel/messages/__init__.py +38 -0
  33. camel/messages/base.py +223 -0
  34. camel/messages/func_message.py +106 -0
  35. camel/models/__init__.py +26 -0
  36. camel/models/base_model.py +110 -0
  37. camel/models/model_factory.py +59 -0
  38. camel/models/open_source_model.py +144 -0
  39. camel/models/openai_model.py +103 -0
  40. camel/models/stub_model.py +106 -0
  41. camel/prompts/__init__.py +38 -0
  42. camel/prompts/ai_society.py +121 -0
  43. camel/prompts/base.py +227 -0
  44. camel/prompts/code.py +111 -0
  45. camel/prompts/evaluation.py +40 -0
  46. camel/prompts/misalignment.py +84 -0
  47. camel/prompts/prompt_templates.py +117 -0
  48. camel/prompts/role_description_prompt_template.py +53 -0
  49. camel/prompts/solution_extraction.py +44 -0
  50. camel/prompts/task_prompt_template.py +56 -0
  51. camel/prompts/translation.py +42 -0
  52. camel/responses/__init__.py +18 -0
  53. camel/responses/agent_responses.py +42 -0
  54. camel/societies/__init__.py +20 -0
  55. camel/societies/babyagi_playing.py +254 -0
  56. camel/societies/role_playing.py +456 -0
  57. camel/storages/__init__.py +23 -0
  58. camel/storages/key_value_storages/__init__.py +23 -0
  59. camel/storages/key_value_storages/base.py +57 -0
  60. camel/storages/key_value_storages/in_memory.py +51 -0
  61. camel/storages/key_value_storages/json.py +97 -0
  62. camel/terminators/__init__.py +23 -0
  63. camel/terminators/base.py +44 -0
  64. camel/terminators/response_terminator.py +118 -0
  65. camel/terminators/token_limit_terminator.py +55 -0
  66. camel/types/__init__.py +54 -0
  67. camel/types/enums.py +176 -0
  68. camel/types/openai_types.py +39 -0
  69. camel/utils/__init__.py +47 -0
  70. camel/utils/commons.py +243 -0
  71. camel/utils/python_interpreter.py +435 -0
  72. camel/utils/token_counting.py +220 -0
  73. camel_ai-0.1.1.dist-info/METADATA +311 -0
  74. camel_ai-0.1.1.dist-info/RECORD +75 -0
  75. camel_ai-0.1.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,179 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import random
15
+ import warnings
16
+ from typing import Any, Dict, Optional, Sequence
17
+
18
+ from colorama import Fore
19
+
20
+ from camel.agents import ChatAgent
21
+ from camel.memories import BaseMemory
22
+ from camel.messages import BaseMessage
23
+ from camel.responses import ChatAgentResponse
24
+ from camel.types import ModelType
25
+ from camel.utils import get_first_int, print_text_animated
26
+
27
+
28
+ class CriticAgent(ChatAgent):
29
+ r"""A class for the critic agent that assists in selecting an option.
30
+
31
+ Args:
32
+ system_message (BaseMessage): The system message for the critic
33
+ agent.
34
+ model_type (ModelType, optional): The LLM model to use for generating
35
+ responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
36
+ model_config (Any, optional): Configuration options for the LLM model.
37
+ (default: :obj:`None`)
38
+ message_window_size (int, optional): The maximum number of previous
39
+ messages to include in the context window. If `None`, no windowing
40
+ is performed. (default: :obj:`6`)
41
+ retry_attempts (int, optional): The number of retry attempts if the
42
+ critic fails to return a valid option. (default: :obj:`2`)
43
+ verbose (bool, optional): Whether to print the critic's messages.
44
+ logger_color (Any): The color of the menu options displayed to the
45
+ user. (default: :obj:`Fore.MAGENTA`)
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ system_message: BaseMessage,
51
+ model_type: ModelType = ModelType.GPT_3_5_TURBO,
52
+ model_config: Optional[Any] = None,
53
+ memory: Optional[BaseMemory] = None,
54
+ message_window_size: int = 6,
55
+ retry_attempts: int = 2,
56
+ verbose: bool = False,
57
+ logger_color: Any = Fore.MAGENTA,
58
+ ) -> None:
59
+ super().__init__(system_message, model_type=model_type,
60
+ model_config=model_config, memory=memory,
61
+ message_window_size=message_window_size)
62
+ self.options_dict: Dict[str, str] = dict()
63
+ self.retry_attempts = retry_attempts
64
+ self.verbose = verbose
65
+ self.logger_color = logger_color
66
+
67
+ def flatten_options(self, messages: Sequence[BaseMessage]) -> str:
68
+ r"""Flattens the options to the critic.
69
+
70
+ Args:
71
+ messages (Sequence[BaseMessage]): A list of `BaseMessage` objects.
72
+
73
+ Returns:
74
+ str: A string containing the flattened options to the critic.
75
+ """
76
+ options = [message.content for message in messages]
77
+ flatten_options = (
78
+ f"> Proposals from "
79
+ f"{messages[0].role_name} ({messages[0].role_type}). "
80
+ "Please choose an option:\n")
81
+ for index, option in enumerate(options):
82
+ flatten_options += f"Option {index + 1}:\n{option}\n\n"
83
+ self.options_dict[str(index + 1)] = option
84
+ format = (
85
+ f"Please first enter your choice ([1-{len(self.options_dict)}]) "
86
+ "and then your explanation and comparison: ")
87
+ return flatten_options + format
88
+
89
+ def get_option(self, input_message: BaseMessage) -> str:
90
+ r"""Gets the option selected by the critic.
91
+
92
+ Args:
93
+ input_message (BaseMessage): A `BaseMessage` object representing
94
+ the input message.
95
+
96
+ Returns:
97
+ str: The option selected by the critic.
98
+ """
99
+ # TODO: Add support for editing options by the critic.
100
+ msg_content = input_message.content
101
+ i = 0
102
+ while i < self.retry_attempts:
103
+ critic_response = self.step(input_message)
104
+
105
+ if critic_response.msgs is None or len(critic_response.msgs) == 0:
106
+ raise RuntimeError("Got None critic messages.")
107
+ if critic_response.terminated:
108
+ raise RuntimeError("Critic step failed.")
109
+
110
+ critic_msg = critic_response.msg
111
+ self.record_message(critic_msg)
112
+ if self.verbose:
113
+ print_text_animated(self.logger_color + "\n> Critic response: "
114
+ f"\x1b[3m{critic_msg.content}\x1b[0m\n")
115
+ choice = self.parse_critic(critic_msg)
116
+
117
+ if choice in self.options_dict:
118
+ return self.options_dict[choice]
119
+ else:
120
+ input_message = BaseMessage(
121
+ role_name=input_message.role_name,
122
+ role_type=input_message.role_type,
123
+ meta_dict=input_message.meta_dict,
124
+ content="> Invalid choice. Please choose again.\n" +
125
+ msg_content,
126
+ )
127
+ i += 1
128
+ warnings.warn("Critic failed to get a valid option. "
129
+ f"After {self.retry_attempts} attempts. "
130
+ "Returning a random option.")
131
+ return random.choice(list(self.options_dict.values()))
132
+
133
+ def parse_critic(self, critic_msg: BaseMessage) -> Optional[str]:
134
+ r"""Parses the critic's message and extracts the choice.
135
+
136
+ Args:
137
+ critic_msg (BaseMessage): A `BaseMessage` object representing the
138
+ critic's response.
139
+
140
+ Returns:
141
+ Optional[str]: The critic's choice as a string, or None if the
142
+ message could not be parsed.
143
+ """
144
+ choice = str(get_first_int(critic_msg.content))
145
+ return choice
146
+
147
+ def reduce_step(
148
+ self,
149
+ input_messages: Sequence[BaseMessage],
150
+ ) -> ChatAgentResponse:
151
+ r"""Performs one step of the conversation by flattening options to the
152
+ critic, getting the option, and parsing the choice.
153
+
154
+ Args:
155
+ input_messages (Sequence[BaseMessage]): A list of BaseMessage
156
+ objects.
157
+
158
+ Returns:
159
+ ChatAgentResponse: A `ChatAgentResponse` object includes the
160
+ critic's choice.
161
+ """
162
+ meta_chat_message = BaseMessage(
163
+ role_name=input_messages[0].role_name,
164
+ role_type=input_messages[0].role_type,
165
+ meta_dict=input_messages[0].meta_dict,
166
+ content="",
167
+ )
168
+
169
+ flatten_options = self.flatten_options(input_messages)
170
+ if self.verbose:
171
+ print_text_animated(self.logger_color +
172
+ f"\x1b[3m{flatten_options}\x1b[0m\n")
173
+ input_msg = meta_chat_message.create_new_instance(flatten_options)
174
+
175
+ option = self.get_option(input_msg)
176
+ output_msg = meta_chat_message.create_new_instance(option)
177
+
178
+ # TODO: The return `info` can be improved.
179
+ return ChatAgentResponse([output_msg], terminated=False, info={})
@@ -0,0 +1,138 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Any, Dict, List, Optional
15
+
16
+ from colorama import Fore
17
+
18
+ from camel.agents import BaseToolAgent, ChatAgent, HuggingFaceToolAgent
19
+ from camel.messages import BaseMessage
20
+ from camel.responses import ChatAgentResponse
21
+ from camel.types import ModelType
22
+ from camel.utils import PythonInterpreter, print_text_animated
23
+
24
+
25
+ class EmbodiedAgent(ChatAgent):
26
+ r"""Class for managing conversations of CAMEL Embodied Agents.
27
+
28
+ Args:
29
+ system_message (BaseMessage): The system message for the chat agent.
30
+ model_type (ModelType, optional): The LLM model to use for generating
31
+ responses. (default :obj:`ModelType.GPT_4`)
32
+ model_config (Any, optional): Configuration options for the LLM model.
33
+ (default: :obj:`None`)
34
+ message_window_size (int, optional): The maximum number of previous
35
+ messages to include in the context window. If `None`, no windowing
36
+ is performed. (default: :obj:`None`)
37
+ action_space (List[Any], optional): The action space for the embodied
38
+ agent. (default: :obj:`None`)
39
+ verbose (bool, optional): Whether to print the critic's messages.
40
+ logger_color (Any): The color of the logger displayed to the user.
41
+ (default: :obj:`Fore.MAGENTA`)
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ system_message: BaseMessage,
47
+ model_type: ModelType = ModelType.GPT_4,
48
+ model_config: Optional[Any] = None,
49
+ message_window_size: Optional[int] = None,
50
+ action_space: Optional[List[BaseToolAgent]] = None,
51
+ verbose: bool = False,
52
+ logger_color: Any = Fore.MAGENTA,
53
+ ) -> None:
54
+ default_action_space = [
55
+ HuggingFaceToolAgent('hugging_face_tool_agent',
56
+ model_type=model_type.value),
57
+ ]
58
+ self.action_space = action_space or default_action_space
59
+ action_space_prompt = self.get_action_space_prompt()
60
+ system_message.content = system_message.content.format(
61
+ action_space=action_space_prompt)
62
+ self.verbose = verbose
63
+ self.logger_color = logger_color
64
+ super().__init__(
65
+ system_message=system_message,
66
+ model_type=model_type,
67
+ model_config=model_config,
68
+ message_window_size=message_window_size,
69
+ )
70
+
71
+ def get_action_space_prompt(self) -> str:
72
+ r"""Returns the action space prompt.
73
+
74
+ Returns:
75
+ str: The action space prompt.
76
+ """
77
+ return "\n".join([
78
+ f"*** {action.name} ***:\n {action.description}"
79
+ for action in self.action_space
80
+ ])
81
+
82
+ def step(
83
+ self,
84
+ input_message: BaseMessage,
85
+ ) -> ChatAgentResponse:
86
+ r"""Performs a step in the conversation.
87
+
88
+ Args:
89
+ input_message (BaseMessage): The input message.
90
+
91
+ Returns:
92
+ ChatAgentResponse: A struct containing the output messages,
93
+ a boolean indicating whether the chat session has terminated,
94
+ and information about the chat session.
95
+ """
96
+ response = super().step(input_message)
97
+
98
+ if response.msgs is None or len(response.msgs) == 0:
99
+ raise RuntimeError("Got None output messages.")
100
+ if response.terminated:
101
+ raise RuntimeError(f"{self.__class__.__name__} step failed.")
102
+
103
+ # NOTE: Only single output messages are supported
104
+ explanations, codes = response.msg.extract_text_and_code_prompts()
105
+
106
+ if self.verbose:
107
+ for explanation, code in zip(explanations, codes):
108
+ print_text_animated(self.logger_color +
109
+ f"> Explanation:\n{explanation}")
110
+ print_text_animated(self.logger_color + f"> Code:\n{code}")
111
+
112
+ if len(explanations) > len(codes):
113
+ print_text_animated(self.logger_color +
114
+ f"> Explanation:\n{explanations}")
115
+
116
+ content = response.msg.content
117
+
118
+ if codes is not None:
119
+ content = "\n> Executed Results:"
120
+ action_space: Dict[str, Any] = {
121
+ action.name: action
122
+ for action in self.action_space
123
+ }
124
+ action_space.update({"print": print, "enumerate": enumerate})
125
+ interpreter = PythonInterpreter(action_space=action_space)
126
+ for block_idx, code in enumerate(codes):
127
+ executed_outputs, _ = code.execute(interpreter)
128
+ content += (f"Executing code block {block_idx}:\n"
129
+ f" - execution output:\n{executed_outputs}\n"
130
+ f" - Local variables:\n{interpreter.state}\n")
131
+ content += "*" * 50 + "\n"
132
+
133
+ # TODO: Handle errors
134
+ content = input_message.content + (Fore.RESET +
135
+ f"\n> Embodied Actions:\n{content}")
136
+ message = BaseMessage(input_message.role_name, input_message.role_type,
137
+ input_message.meta_dict, content)
138
+ return ChatAgentResponse([message], response.terminated, response.info)
@@ -0,0 +1,117 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import re
15
+ from typing import Any, Dict, Optional, Union
16
+
17
+ from camel.agents import ChatAgent
18
+ from camel.messages import BaseMessage
19
+ from camel.prompts import TextPrompt
20
+ from camel.types import ModelType, RoleType
21
+
22
+
23
+ class RoleAssignmentAgent(ChatAgent):
24
+ r"""An agent that generates role names based on the task prompt.
25
+ Attributes:
26
+ role_assignment_prompt (TextPrompt): A prompt for the agent to generate
27
+ role names.
28
+
29
+ Args:
30
+ model_type (ModelType, optional): The type of model to use for the
31
+ agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
32
+ model_config (Any, optional): The configuration for the model.
33
+ (default: :obj:`None`)
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ model_type: ModelType = ModelType.GPT_3_5_TURBO,
39
+ model_config: Optional[Any] = None,
40
+ ) -> None:
41
+ system_message = BaseMessage(
42
+ role_name="Role Assigner",
43
+ role_type=RoleType.ASSISTANT,
44
+ meta_dict=None,
45
+ content="You assign roles based on tasks.",
46
+ )
47
+ super().__init__(system_message, model_type, model_config)
48
+
49
+ def run(
50
+ self,
51
+ task_prompt: Union[str, TextPrompt],
52
+ num_roles: int = 2,
53
+ ) -> Dict[str, str]:
54
+ r"""Generate role names based on the input task prompt.
55
+
56
+ Args:
57
+ task_prompt (Union[str, TextPrompt]): The prompt
58
+ for the task based on which the roles are to be generated.
59
+ num_roles (int, optional): The number of roles to generate.
60
+ (default: :obj:`2`)
61
+
62
+ Returns:
63
+ Dict[str, str]: A dictionary mapping role names to their
64
+ descriptions.
65
+ """
66
+ self.reset()
67
+
68
+ expert_prompt = "===== ANSWER PROMPT =====\n" + "\n".join(
69
+ f"Domain expert {i + 1}: <BLANK>\n"
70
+ f"Associated competencies, characteristics, duties "
71
+ f"and workflows: <BLANK>. End." for i in range(num_roles or 0))
72
+ role_assignment_generation_prompt = TextPrompt(
73
+ "You are a role assignment agent, and you're in charge of " +
74
+ "recruiting {num_roles} experts for the following task." +
75
+ "\n==== TASK =====\n {task}\n\n" +
76
+ "Identify the domain experts you'd recruit and detail their " +
77
+ "associated competencies, characteristics, duties and workflows " +
78
+ "to complete the task.\n " +
79
+ "Your answer MUST adhere to the format of ANSWER PROMPT, and " +
80
+ "ONLY answer the BLANKs.\n" + expert_prompt)
81
+ role_assignment_generation = role_assignment_generation_prompt.format(
82
+ num_roles=num_roles, task=task_prompt)
83
+
84
+ role_assignment_generation_msg = BaseMessage.make_user_message(
85
+ role_name="Role Assigner", content=role_assignment_generation)
86
+
87
+ response = self.step(input_message=role_assignment_generation_msg)
88
+
89
+ msg = response.msg # type: BaseMessage
90
+ terminated = response.terminated
91
+
92
+ # Distribute the output completions into role names and descriptions
93
+ role_names = [
94
+ desc.replace("<|", "").replace("|>", "") for desc in re.findall(
95
+ r"Domain expert \d: (.+?)\nAssociated competencies,",
96
+ msg.content,
97
+ re.DOTALL,
98
+ )
99
+ ]
100
+ role_descriptions = [
101
+ desc.replace("<|", "").replace("|>", "") for desc in re.findall(
102
+ r"Associated competencies, characteristics, "
103
+ r"duties and workflows: (.+?) End.", msg.content, re.DOTALL)
104
+ ]
105
+
106
+ if len(role_names) != num_roles or len(role_descriptions) != num_roles:
107
+ raise RuntimeError(
108
+ "Got None or insufficient information of roles.")
109
+ if terminated:
110
+ raise RuntimeError("Role assignment failed.")
111
+
112
+ role_descriptions_dict = {
113
+ role_name: description
114
+ for role_name, description in zip(role_names, role_descriptions)
115
+ }
116
+
117
+ return role_descriptions_dict