camel-ai 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (75) hide show
  1. camel/__init__.py +30 -0
  2. camel/agents/__init__.py +40 -0
  3. camel/agents/base.py +29 -0
  4. camel/agents/chat_agent.py +539 -0
  5. camel/agents/critic_agent.py +179 -0
  6. camel/agents/embodied_agent.py +138 -0
  7. camel/agents/role_assignment_agent.py +117 -0
  8. camel/agents/task_agent.py +382 -0
  9. camel/agents/tool_agents/__init__.py +20 -0
  10. camel/agents/tool_agents/base.py +40 -0
  11. camel/agents/tool_agents/hugging_face_tool_agent.py +203 -0
  12. camel/configs.py +159 -0
  13. camel/embeddings/__init__.py +20 -0
  14. camel/embeddings/base.py +65 -0
  15. camel/embeddings/openai_embedding.py +74 -0
  16. camel/functions/__init__.py +27 -0
  17. camel/functions/base_io_functions.py +261 -0
  18. camel/functions/math_functions.py +61 -0
  19. camel/functions/openai_function.py +88 -0
  20. camel/functions/search_functions.py +309 -0
  21. camel/functions/unstructured_io_fuctions.py +616 -0
  22. camel/functions/weather_functions.py +136 -0
  23. camel/generators.py +263 -0
  24. camel/human.py +130 -0
  25. camel/memories/__init__.py +28 -0
  26. camel/memories/base.py +75 -0
  27. camel/memories/chat_history_memory.py +111 -0
  28. camel/memories/context_creators/__init__.py +18 -0
  29. camel/memories/context_creators/base.py +72 -0
  30. camel/memories/context_creators/score_based.py +130 -0
  31. camel/memories/records.py +92 -0
  32. camel/messages/__init__.py +38 -0
  33. camel/messages/base.py +223 -0
  34. camel/messages/func_message.py +106 -0
  35. camel/models/__init__.py +26 -0
  36. camel/models/base_model.py +110 -0
  37. camel/models/model_factory.py +59 -0
  38. camel/models/open_source_model.py +144 -0
  39. camel/models/openai_model.py +103 -0
  40. camel/models/stub_model.py +106 -0
  41. camel/prompts/__init__.py +38 -0
  42. camel/prompts/ai_society.py +121 -0
  43. camel/prompts/base.py +227 -0
  44. camel/prompts/code.py +111 -0
  45. camel/prompts/evaluation.py +40 -0
  46. camel/prompts/misalignment.py +84 -0
  47. camel/prompts/prompt_templates.py +117 -0
  48. camel/prompts/role_description_prompt_template.py +53 -0
  49. camel/prompts/solution_extraction.py +44 -0
  50. camel/prompts/task_prompt_template.py +56 -0
  51. camel/prompts/translation.py +42 -0
  52. camel/responses/__init__.py +18 -0
  53. camel/responses/agent_responses.py +42 -0
  54. camel/societies/__init__.py +20 -0
  55. camel/societies/babyagi_playing.py +254 -0
  56. camel/societies/role_playing.py +456 -0
  57. camel/storages/__init__.py +23 -0
  58. camel/storages/key_value_storages/__init__.py +23 -0
  59. camel/storages/key_value_storages/base.py +57 -0
  60. camel/storages/key_value_storages/in_memory.py +51 -0
  61. camel/storages/key_value_storages/json.py +97 -0
  62. camel/terminators/__init__.py +23 -0
  63. camel/terminators/base.py +44 -0
  64. camel/terminators/response_terminator.py +118 -0
  65. camel/terminators/token_limit_terminator.py +55 -0
  66. camel/types/__init__.py +54 -0
  67. camel/types/enums.py +176 -0
  68. camel/types/openai_types.py +39 -0
  69. camel/utils/__init__.py +47 -0
  70. camel/utils/commons.py +243 -0
  71. camel/utils/python_interpreter.py +435 -0
  72. camel/utils/token_counting.py +220 -0
  73. camel_ai-0.1.1.dist-info/METADATA +311 -0
  74. camel_ai-0.1.1.dist-info/RECORD +75 -0
  75. camel_ai-0.1.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,220 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from abc import ABC, abstractmethod
15
+ from typing import List
16
+
17
+ from camel.messages import OpenAIMessage
18
+ from camel.types import ModelType
19
+
20
+
21
+ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
22
+ r"""Parse the message list into a single prompt following model-specifc
23
+ formats.
24
+
25
+ Args:
26
+ messages (List[OpenAIMessage]): Message list with the chat history
27
+ in OpenAI API format.
28
+ model (ModelType): Model type for which messages will be parsed.
29
+
30
+ Returns:
31
+ str: A single prompt summarizing all the messages.
32
+ """
33
+ system_message = messages[0]["content"]
34
+
35
+ ret: str
36
+ if model == ModelType.LLAMA_2:
37
+ # reference: https://github.com/facebookresearch/llama/blob/cfc3fc8c1968d390eb830e65c63865e980873a06/llama/generation.py#L212
38
+ seps = [" ", " </s><s>"]
39
+ role_map = {"user": "[INST]", "assistant": "[/INST]"}
40
+
41
+ system_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n"
42
+ ret = ""
43
+ for i, msg in enumerate(messages[1:]):
44
+ role = role_map[msg["role"]]
45
+ content = msg["content"]
46
+ if content:
47
+ if not isinstance(content, str):
48
+ raise ValueError("Currently multimodal context is not "
49
+ "supported by the token counter.")
50
+ if i == 0:
51
+ ret += system_prompt + content
52
+ else:
53
+ ret += role + " " + content + seps[i % 2]
54
+ else:
55
+ ret += role
56
+ return ret
57
+ elif model == ModelType.VICUNA or model == ModelType.VICUNA_16K:
58
+ seps = [" ", "</s>"]
59
+ role_map = {"user": "USER", "assistant": "ASSISTANT"}
60
+
61
+ system_prompt = f"{system_message}"
62
+ ret = system_prompt + seps[0]
63
+ for i, msg in enumerate(messages[1:]):
64
+ role = role_map[msg["role"]]
65
+ content = msg["content"]
66
+ if not isinstance(content, str):
67
+ raise ValueError("Currently multimodal context is not "
68
+ "supported by the token counter.")
69
+ if content:
70
+ ret += role + ": " + content + seps[i % 2]
71
+ else:
72
+ ret += role + ":"
73
+ return ret
74
+ else:
75
+ raise ValueError(f"Invalid model type: {model}")
76
+
77
+
78
+ def get_model_encoding(value_for_tiktoken: str):
79
+ r"""Get model encoding from tiktoken.
80
+
81
+ Args:
82
+ value_for_tiktoken: Model value for tiktoken.
83
+
84
+ Returns:
85
+ tiktoken.Encoding: Model encoding.
86
+ """
87
+ import tiktoken
88
+ try:
89
+ encoding = tiktoken.encoding_for_model(value_for_tiktoken)
90
+ except KeyError:
91
+ print("Model not found. Using cl100k_base encoding.")
92
+ encoding = tiktoken.get_encoding("cl100k_base")
93
+ return encoding
94
+
95
+
96
+ class BaseTokenCounter(ABC):
97
+ r"""Base class for token counters of different kinds of models."""
98
+
99
+ @abstractmethod
100
+ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
101
+ r"""Count number of tokens in the provided message list.
102
+
103
+ Args:
104
+ messages (List[OpenAIMessage]): Message list with the chat history
105
+ in OpenAI API format.
106
+
107
+ Returns:
108
+ int: Number of tokens in the messages.
109
+ """
110
+ pass
111
+
112
+
113
+ class OpenSourceTokenCounter(BaseTokenCounter):
114
+
115
+ def __init__(self, model_type: ModelType, model_path: str):
116
+ r"""Constructor for the token counter for open-source models.
117
+
118
+ Args:
119
+ model_type (ModelType): Model type for which tokens will be
120
+ counted.
121
+ model_path (str): The path to the model files, where the tokenizer
122
+ model should be located.
123
+ """
124
+
125
+ # Use a fast Rust-based tokenizer if it is supported for a given model.
126
+ # If a fast tokenizer is not available for a given model,
127
+ # a normal Python-based tokenizer is returned instead.
128
+ from transformers import AutoTokenizer
129
+ try:
130
+ tokenizer = AutoTokenizer.from_pretrained(
131
+ model_path,
132
+ use_fast=True,
133
+ )
134
+ except TypeError:
135
+ tokenizer = AutoTokenizer.from_pretrained(
136
+ model_path,
137
+ use_fast=False,
138
+ )
139
+ except:
140
+ raise ValueError(
141
+ f"Invalid `model_path` ({model_path}) is provided. "
142
+ "Tokenizer loading failed.")
143
+
144
+ self.tokenizer = tokenizer
145
+ self.model_type = model_type
146
+
147
+ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
148
+ r"""Count number of tokens in the provided message list using
149
+ loaded tokenizer specific for this type of model.
150
+
151
+ Args:
152
+ messages (List[OpenAIMessage]): Message list with the chat history
153
+ in OpenAI API format.
154
+
155
+ Returns:
156
+ int: Number of tokens in the messages.
157
+ """
158
+ prompt = messages_to_prompt(messages, self.model_type)
159
+ input_ids = self.tokenizer(prompt).input_ids
160
+
161
+ return len(input_ids)
162
+
163
+
164
+ class OpenAITokenCounter(BaseTokenCounter):
165
+
166
+ def __init__(self, model: ModelType):
167
+ r"""Constructor for the token counter for OpenAI models.
168
+
169
+ Args:
170
+ model_type (ModelType): Model type for which tokens will be
171
+ counted.
172
+ """
173
+ self.model: str = model.value_for_tiktoken
174
+
175
+ self.tokens_per_message: int
176
+ self.tokens_per_name: int
177
+
178
+ if self.model == "gpt-3.5-turbo-0301":
179
+ # Every message follows <|start|>{role/name}\n{content}<|end|>\n
180
+ self.tokens_per_message = 4
181
+ # If there's a name, the role is omitted
182
+ self.tokens_per_name = -1
183
+ elif ("gpt-3.5-turbo" in self.model) or ("gpt-4" in self.model):
184
+ self.tokens_per_message = 3
185
+ self.tokens_per_name = 1
186
+ else:
187
+ # flake8: noqa :E501
188
+ raise NotImplementedError(
189
+ "Token counting for OpenAI Models is not presently "
190
+ f"implemented for model {model}. "
191
+ "See https://github.com/openai/openai-python/blob/main/chatml.md "
192
+ "for information on how messages are converted to tokens. "
193
+ "See https://platform.openai.com/docs/models/gpt-4"
194
+ "or https://platform.openai.com/docs/models/gpt-3-5"
195
+ "for information about openai chat models.")
196
+
197
+ self.encoding = get_model_encoding(self.model)
198
+
199
+ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
200
+ r"""Count number of tokens in the provided message list with the
201
+ help of package tiktoken.
202
+
203
+ Args:
204
+ messages (List[OpenAIMessage]): Message list with the chat history
205
+ in OpenAI API format.
206
+
207
+ Returns:
208
+ int: Number of tokens in the messages.
209
+ """
210
+ num_tokens = 0
211
+ for message in messages:
212
+ num_tokens += self.tokens_per_message
213
+ for key, value in message.items():
214
+ num_tokens += len(self.encoding.encode(str(value)))
215
+ if key == "name":
216
+ num_tokens += self.tokens_per_name
217
+
218
+ # every reply is primed with <|start|>assistant<|message|>
219
+ num_tokens += 3
220
+ return num_tokens
@@ -0,0 +1,311 @@
1
+ Metadata-Version: 2.1
2
+ Name: camel-ai
3
+ Version: 0.1.1
4
+ Summary: Communicative Agents for AI Society Study
5
+ Home-page: https://www.camel-ai.org/
6
+ License: Apache-2.0
7
+ Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
8
+ Author: CAMEL-AI.org
9
+ Requires-Python: >=3.8.1,<3.12
10
+ Classifier: License :: OSI Approved :: Apache Software License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.9
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Provides-Extra: all
16
+ Provides-Extra: huggingface-agent
17
+ Provides-Extra: test
18
+ Provides-Extra: tools
19
+ Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
20
+ Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
21
+ Requires-Dist: argilla (>=1.19.0,<2.0.0) ; extra == "tools" or extra == "all"
22
+ Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
23
+ Requires-Dist: colorama (>=0,<1)
24
+ Requires-Dist: datasets (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
25
+ Requires-Dist: diffusers (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
26
+ Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
27
+ Requires-Dist: jsonschema (>=4,<5)
28
+ Requires-Dist: mock (>=5,<6) ; extra == "test"
29
+ Requires-Dist: numpy (>=1,<2)
30
+ Requires-Dist: openai (>=1.2.3,<2.0.0)
31
+ Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
32
+ Requires-Dist: protobuf (>=4,<5)
33
+ Requires-Dist: pyowm (>=3.3.0,<4.0.0) ; extra == "tools" or extra == "all"
34
+ Requires-Dist: pytest (>=7,<8) ; extra == "test"
35
+ Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
36
+ Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
37
+ Requires-Dist: tiktoken (>=0,<1)
38
+ Requires-Dist: torch (>=1,<2) ; extra == "huggingface-agent" or extra == "all"
39
+ Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
40
+ Requires-Dist: unstructured (>=0.10.30,<0.11.0) ; extra == "tools" or extra == "all"
41
+ Requires-Dist: wikipedia (>=1,<2) ; extra == "tools" or extra == "all"
42
+ Project-URL: Documentation, https://docs.camel-ai.org
43
+ Project-URL: Repository, https://github.com/camel-ai/camel
44
+ Description-Content-Type: text/markdown
45
+
46
+ <div style="left">
47
+ <a href="https://colab.research.google.com/drive/1AzP33O8rnMW__7ocWJhVBXjKziJXPtim?usp=sharing" target="_blank">
48
+ <img alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg" />
49
+ </a>
50
+ <a href="https://huggingface.co/camel-ai" target="_blank">
51
+ <img alt="Hugging Face" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-CAMEL--AI-ffc107?color=ffc107&logoColor=white" />
52
+ </a>
53
+ <a href="https://join.slack.com/t/camel-kwr1314/shared_invite/zt-1vy8u9lbo-ZQmhIAyWSEfSwLCl2r2eKA" target="_blank">
54
+ <img alt="Slack" src="https://img.shields.io/badge/Slack-CAMEL--AI-blueviolet?logo=slack" />
55
+ </a>
56
+ <a href="https://discord.gg/CNcNpquyDc" target="_blank">
57
+ <img alt="Discord" src="https://img.shields.io/badge/Discord-CAMEL--AI-7289da?logo=discord&logoColor=white&color=7289da" />
58
+ </a>
59
+ <a href="https://ghli.org/camel/wechat.png" target="_blank">
60
+ <img alt="Discord" src="https://img.shields.io/badge/WeChat-CamelAIOrg-brightgreen?logo=wechat&logoColor=white" />
61
+ </a>
62
+ <a href="https://twitter.com/CamelAIOrg" target="_blank">
63
+ <img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/CamelAIOrg?style=social&color=brightgreen&logo=twitter" />
64
+ </a>
65
+ </div>
66
+
67
+ # CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society
68
+
69
+ <div align="center">
70
+
71
+ <a>![Python 3.9+](https://img.shields.io/badge/Python-3.9%2B-brightgreen.svg)</a>
72
+ <a href="https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml">![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/camel-ai/camel/pytest_package.yml?label=tests&logo=github)</a>
73
+ <a href="https://camel-ai.github.io/camel/">
74
+ ![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/camel-ai/camel/documentation.yaml?label=docs&logo=github)
75
+ </a>
76
+ <a href="https://github.com/camel-ai/camel/stargazers" target="_blank">
77
+ <img alt="GitHub Repo Stars" src="https://img.shields.io/github/stars/camel-ai/camel?label=stars&logo=github&color=brightgreen" />
78
+ </a>
79
+ <a href="https://github.com/camel-ai/camel/blob/master/licenses/LICENSE">![License](https://img.shields.io/github/license/camel-ai/camel?label=license&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyNCAyNCIgd2lkdGg9IjI0IiBoZWlnaHQ9IjI0IiBmaWxsPSIjZmZmZmZmIj48cGF0aCBmaWxsLXJ1bGU9ImV2ZW5vZGQiIGQ9Ik0xMi43NSAyLjc1YS43NS43NSAwIDAwLTEuNSAwVjQuNUg5LjI3NmExLjc1IDEuNzUgMCAwMC0uOTg1LjMwM0w2LjU5NiA1Ljk1N0EuMjUuMjUgMCAwMTYuNDU1IDZIMi4zNTNhLjc1Ljc1IDAgMTAwIDEuNUgzLjkzTC41NjMgMTUuMThhLjc2Mi43NjIgMCAwMC4yMS44OGMuMDguMDY0LjE2MS4xMjUuMzA5LjIyMS4xODYuMTIxLjQ1Mi4yNzguNzkyLjQzMy42OC4zMTEgMS42NjIuNjIgMi44NzYuNjJhNi45MTkgNi45MTkgMCAwMDIuODc2LS42MmMuMzQtLjE1NS42MDYtLjMxMi43OTItLjQzMy4xNS0uMDk3LjIzLS4xNTguMzEtLjIyM2EuNzUuNzUgMCAwMC4yMDktLjg3OEw1LjU2OSA3LjVoLjg4NmMuMzUxIDAgLjY5NC0uMTA2Ljk4NC0uMzAzbDEuNjk2LTEuMTU0QS4yNS4yNSAwIDAxOS4yNzUgNmgxLjk3NXYxNC41SDYuNzYzYS43NS43NSAwIDAwMCAxLjVoMTAuNDc0YS43NS43NSAwIDAwMC0xLjVIMTIuNzVWNmgxLjk3NGMuMDUgMCAuMS4wMTUuMTQuMDQzbDEuNjk3IDEuMTU0Yy4yOS4xOTcuNjMzLjMwMy45ODQuMzAzaC44ODZsLTMuMzY4IDcuNjhhLjc1Ljc1IDAgMDAuMjMuODk2Yy4wMTIuMDA5IDAgMCAuMDAyIDBhMy4xNTQgMy4xNTQgMCAwMC4zMS4yMDZjLjE4NS4xMTIuNDUuMjU2Ljc5LjRhNy4zNDMgNy4zNDMgMCAwMDIuODU1LjU2OCA3LjM0MyA3LjM0MyAwIDAwMi44NTYtLjU2OWMuMzM4LS4xNDMuNjA0LS4yODcuNzktLjM5OWEzLjUgMy41IDAgMDAuMzEtLjIwNi43NS43NSAwIDAwLjIzLS44OTZMMjAuMDcgNy41aDEuNTc4YS43NS43NSAwIDAwMC0xLjVoLTQuMTAyYS4yNS4yNSAwIDAxLS4xNC0uMDQzbC0xLjY5Ny0xLjE1NGExLjc1IDEuNzUgMCAwMC0uOTg0LS4zMDNIMTIuNzVWMi43NXpNMi4xOTMgMTUuMTk4YTUuNDE4IDUuNDE4IDAgMDAyLjU1Ny42MzUgNS40MTggNS40MTggMCAwMDIuNTU3LS42MzVMNC43NSA5LjM2OGwtMi41NTcgNS44M3ptMTQuNTEtLjAyNGMuMDgyLjA0LjE3NC4wODMuMjc1LjEyNi41My4yMjMgMS4zMDUuNDUgMi4yNzIuNDVhNS44NDYgNS44NDYgMCAwMDIuNTQ3LS41NzZMMTkuMjUgOS4zNjdsLTIuNTQ3IDUuODA3eiI+PC9wYXRoPjwvc3ZnPgo=)</a>
80
+ </div>
81
+
82
+ <p align="center">
83
+ <a href="https://github.com/camel-ai/camel#community">Community</a> |
84
+ <a href="https://github.com/camel-ai/camel#installation">Installation</a> |
85
+ <a href="https://camel-ai.github.io/camel/">Documentation</a> |
86
+ <a href="https://github.com/camel-ai/camel/tree/HEAD/examples">Examples</a> |
87
+ <a href="https://arxiv.org/abs/2303.17760">Paper</a> |
88
+ <a href="https://github.com/camel-ai/camel#citation">Citation</a> |
89
+ <a href="https://github.com/camel-ai/camel#contributing-to-camel-">Contributing</a> |
90
+ <a href="https://www.camel-ai.org/">CAMEL-AI</a>
91
+ </p>
92
+
93
+ <p align="center">
94
+ <img src='./misc/logo.png' width=800>
95
+ </p>
96
+
97
+ ## Overview
98
+ The rapid advancement of conversational and chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents and provide insight into their "cognitive" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named *role-playing*. Our approach involves using *inception prompting* to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of chat agents, providing a valuable resource for investigating conversational language models. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond. The GitHub repository of this project is made publicly available on: [https://github.com/camel-ai/camel](https://github.com/camel-ai/camel).
99
+
100
+ ## Community
101
+ 🐫 CAMEL is an open-source library designed for the study of autonomous and communicative agents. We believe that studying these agents on a large scale offers valuable insights into their behaviors, capabilities, and potential risks. To facilitate research in this field, we implement and support various types of agents, tasks, prompts, models, and simulated environments.
102
+
103
+ Join us ([*Slack*](https://join.slack.com/t/camel-kwr1314/shared_invite/zt-1vy8u9lbo-ZQmhIAyWSEfSwLCl2r2eKA), [*Discord*](https://discord.gg/CNcNpquyDc) or [*WeChat*](https://ghli.org/camel/wechat.png)) in pushing the boundaries of building AI Societiy.
104
+
105
+ ## Try it yourself
106
+ We provide a [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1AzP33O8rnMW__7ocWJhVBXjKziJXPtim?usp=sharing) demo showcasing a conversation between two ChatGPT agents playing roles as a python programmer and a stock trader collaborating on developing a trading bot for stock market.
107
+
108
+ <p align="center">
109
+ <img src='./misc/framework.png' width=800>
110
+ </p>
111
+
112
+ ## Documentation
113
+
114
+ [CAMEL package documentation pages](https://camel-ai.github.io/camel/)
115
+
116
+ ## Installation
117
+
118
+ Install `CAMEL` from source with poetry (Recommended):
119
+ ```sh
120
+ # Clone github repo
121
+ # For the latest code:
122
+ git clone https://github.com/camel-ai/camel.git
123
+ # Or for the stable code:
124
+ git clone -b v0.1.0 https://github.com/camel-ai/camel.git
125
+
126
+ # Change directory into project directory
127
+ cd camel
128
+
129
+ # Activate camel virtual environment
130
+ poetry shell
131
+
132
+ # Install camel from source
133
+ # It takes about 90 seconds to resolve dependencies
134
+ poetry install
135
+
136
+ # Or if you want to use "huggingface agent"
137
+ poetry install -E huggingface-agent # (Optional)
138
+
139
+ # do something with camel
140
+
141
+ # Exit the virtual environment
142
+ exit
143
+ ```
144
+
145
+ Install `CAMEL` from source with conda and pip:
146
+ ```sh
147
+ # Create a conda virtual environment
148
+ conda create --name camel python=3.10
149
+
150
+ # Activate camel conda environment
151
+ conda activate camel
152
+
153
+ # Clone github repo
154
+ git clone -b v0.1.0 https://github.com/camel-ai/camel.git
155
+
156
+ # Change directory into project directory
157
+ cd camel
158
+
159
+ # Install camel from source
160
+ pip install -e .
161
+
162
+ # Or if you want to use "huggingface agent"
163
+ pip install -e .[huggingface-agent] # (Optional)
164
+ ```
165
+ ## Example
166
+ You can find a list of tasks for different set of assistant and user role pairs [here](https://drive.google.com/file/d/194PPaSTBR07m-PzjS-Ty6KlPLdFIPQDd/view?usp=share_link)
167
+
168
+ Run the `role_playing.py` script
169
+
170
+ First, you need to add your OpenAI API key to system environment variables. The method to do this depends on your operating system and the shell you're using.
171
+
172
+ **For Bash shell (Linux, macOS, Git Bash on Windows):**
173
+
174
+ ```bash
175
+ # Export your OpenAI API key
176
+ export OPENAI_API_KEY=<insert your OpenAI API key>
177
+ OPENAI_API_BASE_URL=<inert your OpenAI API BASE URL> #(Should you utilize an OpenAI proxy service, kindly specify this)
178
+ ```
179
+
180
+ **For Windows Command Prompt:**
181
+
182
+ ```cmd
183
+ REM export your OpenAI API key
184
+ set OPENAI_API_KEY=<insert your OpenAI API key>
185
+ set OPENAI_API_BASE_URL=<inert your OpenAI API BASE URL> #(Should you utilize an OpenAI proxy service, kindly specify this)
186
+ ```
187
+
188
+ **For Windows PowerShell:**
189
+
190
+ ```powershell
191
+ # Export your OpenAI API key
192
+ $env:OPENAI_API_KEY="<insert your OpenAI API key>"
193
+ $env:OPENAI_API_BASE_URL="<inert your OpenAI API BASE URL>" #(Should you utilize an OpenAI proxy service, kindly specify this)
194
+
195
+ ```
196
+
197
+ Replace `<insert your OpenAI API key>` with your actual OpenAI API key in each case. Make sure there are no spaces around the `=` sign.
198
+
199
+ After setting the OpenAI API key, you can run the script:
200
+
201
+ ```bash
202
+ # You can change the role pair and initial prompt in role_playing.py
203
+ python examples/ai_society/role_playing.py
204
+ ```
205
+
206
+ Please note that the environment variable is session-specific. If you open a new terminal window or tab, you will need to set the API key again in that new session.
207
+
208
+
209
+ ## Use Open-Source Models as Backends
210
+
211
+ The basic workflow of using an open-sourced model as the backend is based on an external server running LLM inference service, e.g. during the development we chose [FastChat](https://github.com/lm-sys/FastChat) to run the service.
212
+
213
+ We do not fix the choice of server to decouple the implementation of any specific LLM inference server with CAMEL (indicating the server needs to be deployed by the user himself). But the server to be deployed must satisfy that **it supports OpenAI-compatible APIs, especially the method `openai.ChatCompletion.create`**.
214
+
215
+ Here are some instructions for enabling open-source backends, where we use the [FastChat](https://github.com/lm-sys/FastChat) and a LLaMA2-based model ([`meta-llama/Llama-2-7b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)) in the example. Please install FastChat in advance following their installation guidance.
216
+
217
+ 1. Before running CAMEL, we should firstly launch FastChat server following the guidance on https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md. The instructions summarized below should be kept running **in separate processes**:
218
+
219
+ ```sh
220
+ # Launch the controller
221
+ python -m fastchat.serve.controller
222
+
223
+ # Launch the model worker(s)
224
+ python3 -m fastchat.serve.model_worker --model-path meta-llama/Llama-2-7b-chat-hf
225
+
226
+ # Launch the RESTful API server
227
+ python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
228
+ ```
229
+
230
+ 2. After observing the controller successfully receiving the heart beat signal from the worker, the server should be ready for use at http://localhost:8000/v1.
231
+
232
+ 3. Then we can try on running `role_playing_with_open_source_model.py`, where each agent in this example is initialized with specifying the `model_path` and `server_url`, similar to the example code below:
233
+
234
+ ```python
235
+ system_message = # ...
236
+
237
+ agent_kwargs = dict(
238
+ model=model_type,
239
+ model_config=OpenSourceConfig(
240
+ model_path="meta-llama/Llama-2-7b-chat-hf",
241
+ server_url="http://localhost:8000/v1",
242
+ ),
243
+ )
244
+
245
+ agent = ChatAgent(
246
+ system_message,
247
+ **agent_kwargs,
248
+ )
249
+ ```
250
+
251
+ ### Supported Models
252
+
253
+ - LLaMA2-based models
254
+ - example: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)
255
+ - Vicuna-based models
256
+ - example: [lmsys/vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5)
257
+
258
+ ## Data (Hosted on Hugging Face)
259
+ | Dataset | Chat format | Instruction format | Chat format (translated) |
260
+ | -- | -- | -- | -- |
261
+ | **AI Society** | [Chat format](https://huggingface.co/datasets/camel-ai/ai_society/blob/main/ai_society_chat.tar.gz) | [Instruction format](https://huggingface.co/datasets/camel-ai/ai_society/blob/main/ai_society_instructions.json) | [Chat format (translated)](https://huggingface.co/datasets/camel-ai/ai_society_translated) |
262
+ | **Code** | [Chat format](https://huggingface.co/datasets/camel-ai/code/blob/main/code_chat.tar.gz) | [Instruction format](https://huggingface.co/datasets/camel-ai/code/blob/main/code_instructions.json) | x |
263
+ | **Math** | [Chat format](https://huggingface.co/datasets/camel-ai/math) | x | x|
264
+ | **Physics** | [Chat format](https://huggingface.co/datasets/camel-ai/physics) | x | x |
265
+ | **Chemistry** | [Chat format](https://huggingface.co/datasets/camel-ai/chemistry) | x | x |
266
+ | **Biology** | [Chat format](https://huggingface.co/datasets/camel-ai/biology) | x | x |
267
+
268
+ ## Visualizations of Instructions and Tasks
269
+
270
+ | Dataset | Instructions | Tasks |
271
+ | -- | -- | -- |
272
+ | **AI Society** | [Instructions](https://atlas.nomic.ai/map/3a559a06-87d0-4476-a879-962656242452/db961915-b254-48e8-8e5c-917f827b74c6) | [Tasks](https://atlas.nomic.ai/map/cb96f41b-a6fd-4fe4-ac40-08e101714483/ae06156c-a572-46e9-8345-ebe18586d02b) |
273
+ | **Code** | [Instructions](https://atlas.nomic.ai/map/902d6ccb-0bbb-4294-83a8-1c7d2dae03c8/ace2e146-e49f-41db-a1f4-25a2c4be2457) | [Tasks](https://atlas.nomic.ai/map/efc38617-9180-490a-8630-43a05b35d22d/2576addf-a133-45d5-89a9-6b067b6652dd) |
274
+ | **Misalignment** | [Instructions](https://atlas.nomic.ai/map/5c491035-a26e-4a05-9593-82ffb2c3ab40/2bd98896-894e-4807-9ed8-a203ccb14d5e) | [Tasks](https://atlas.nomic.ai/map/abc357dd-9c04-4913-9541-63e259d7ac1f/825139a4-af66-427c-9d0e-f36b5492ab3f) |
275
+
276
+ ## Implemented Research Ideas from Other Works
277
+ We implemented amazing research ideas from other works for you to build, compare and customize your agents. If you use any of these modules, please kindly cite the original works:
278
+ - `TaskCreationAgent`, `TaskPrioritizationAgent` and `BabyAGI` from *Nakajima et al.*: [Task-Driven Autonomous Agent](https://yoheinakajima.com/task-driven-autonomous-agent-utilizing-gpt-4-pinecone-and-langchain-for-diverse-applications/). [[Example](https://github.com/camel-ai/camel/blob/master/examples/ai_society/babyagi_playing.py)]
279
+
280
+ ## News
281
+ - Released AI Society and Code dataset (April 2, 2023)
282
+ - Initial release of `CAMEL` python library (March 21, 2023)
283
+
284
+ ## Citation
285
+ ```
286
+ @inproceedings{li2023camel,
287
+ title={CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society},
288
+ author={Li, Guohao and Hammoud, Hasan Abed Al Kader and Itani, Hani and Khizbullin, Dmitrii and Ghanem, Bernard},
289
+ booktitle={Thirty-seventh Conference on Neural Information Processing Systems},
290
+ year={2023}
291
+ }
292
+ ```
293
+ ## Acknowledgement
294
+ Special thanks to [Nomic AI](https://home.nomic.ai/) for giving us extended access to their data set exploration tool (Atlas).
295
+
296
+ We would also like to thank Haya Hammoud for designing the logo of our project.
297
+
298
+ ## License
299
+
300
+ The intended purpose and licensing of CAMEL is solely for research use.
301
+
302
+ The source code is licensed under Apache 2.0.
303
+
304
+ The datasets are licensed under CC BY NC 4.0, which permits only non-commercial usage. It is advised that any models trained using the dataset should not be utilized for anything other than research purposes.
305
+
306
+ ## Contributing to CAMEL 🐫
307
+ We appreciate your interest in contributing to our open-source initiative. We provide a document of [contributing guidelines](https://github.com/camel-ai/camel/blob/master/CONTRIBUTING.md) which outlines the steps for contributing to CAMEL. Please refer to this guide to ensure smooth collaboration and successful contributions. 🤝🚀
308
+
309
+ ## Contact
310
+ For more information please contact camel.ai.team@gmail.com.
311
+
@@ -0,0 +1,75 @@
1
+ camel/__init__.py,sha256=npVuV2hbipntfiNEuiTNH-1n3SNZ_ZsO79FoNanuscY,991
2
+ camel/agents/__init__.py,sha256=PX0mqooxrfYsYqaGA30bMiFz7PGNvLlS_R_yV-2z_YU,1412
3
+ camel/agents/base.py,sha256=X39qWSiT1WnDqaJ9k3gQrTpOQSwUKzNEVpp5AY6fDH8,1130
4
+ camel/agents/chat_agent.py,sha256=eq0kOYCu1xWVBZMHcC_tCjZieqBD4-VDgh92YHPcNao,21214
5
+ camel/agents/critic_agent.py,sha256=hEAzHw9ZmM5SNOsY3qBz8lzaV6ki7j5YxdrBENbbvrE,7303
6
+ camel/agents/embodied_agent.py,sha256=4xIgJSp0X35vq1ckQOYOfY_G1sm27gcsfpE36oQU20E,5847
7
+ camel/agents/role_assignment_agent.py,sha256=BiAQYunjQEm6zsPRTQj5nN4SaXumIPfWyQL4m9O0qlo,4755
8
+ camel/agents/task_agent.py,sha256=trx5jc27veMxIg8eFAl4Z5nErqw6Yis08ZIWM0PIpN8,14785
9
+ camel/agents/tool_agents/__init__.py,sha256=ulTNWU2qoFGe3pvVmCq_sdfeSX3NKZ0due66TYvsL-M,862
10
+ camel/agents/tool_agents/base.py,sha256=derH3sS0seYViuLaci7OT6ACnsXJ_iPi1dxpWLtljzo,1400
11
+ camel/agents/tool_agents/hugging_face_tool_agent.py,sha256=HEs7eLcfKjRxp5w4aDyLGKZVFWY9IKYAnSGBKTKut-w,8691
12
+ camel/configs.py,sha256=SOz-N09ffOcIEUrjL_fZTIgtrbS5Q7a6pEsj7W-L4NM,7440
13
+ camel/embeddings/__init__.py,sha256=zPLJAYf6aBh4VK7GE3_f1FMLM8oxvC0Mh9UvQwjQRoA,845
14
+ camel/embeddings/base.py,sha256=asXZ_VYMf3UkhrsLFKzoPNf1XCP5W30n7nmt5XpB4Gg,2166
15
+ camel/embeddings/openai_embedding.py,sha256=3WfHlzNO77l8DPACLEknKjhZIHvGN3bV5JoTAYH4jO8,2583
16
+ camel/functions/__init__.py,sha256=CkJb-dPAF_RCzuTE7-oO7vWRhp_Sdgz11NqkjiD76FI,1061
17
+ camel/functions/base_io_functions.py,sha256=Yxdy6uenhyuXPJUq0sYNHXjz2yxuYYKKDq2CS785_Eg,8619
18
+ camel/functions/math_functions.py,sha256=f8eFp02GrFSeKUB7ypmpvx8xnxpXR81hQ33S1RKQjSQ,1712
19
+ camel/functions/openai_function.py,sha256=DXEB1yFlBmvblNebtLYNErSUyUEviOM5aHULml_5Ef0,3699
20
+ camel/functions/search_functions.py,sha256=2NycgwRqraU_hVELysMN6ooPRs7-s1IcrKmfWxUKSCg,10601
21
+ camel/functions/unstructured_io_fuctions.py,sha256=oPQknC3KqxTV1e0AZYPiNW5YYnc4etO14pLGJB8dJo8,23865
22
+ camel/functions/weather_functions.py,sha256=WDhFlRPcUFQK4eRv7fmz_2ZWFi7j0aLjodikM3AkZbg,5731
23
+ camel/generators.py,sha256=VSeF3Ub60fJU__1NF9gKY4o2zKUZ5-162SjBQHKINZ8,10132
24
+ camel/human.py,sha256=FvlGfOO7cTDGChF-IKyonf5fmD8zqxEEeeQ5SEbBsjc,4922
25
+ camel/memories/__init__.py,sha256=6C0Jpat5vXDjN9DrbbQRCEafHEf2I_DMkmDeszK3wus,1116
26
+ camel/memories/base.py,sha256=CXjjEaFvCqBjADlTKWNx_WqpZf7n-2rhMv1re87vGHE,3026
27
+ camel/memories/chat_history_memory.py,sha256=hzLjA1YrvbQMhZ2HfofJWVtJN-1lTkzvssUBAOGm3HU,4821
28
+ camel/memories/context_creators/__init__.py,sha256=SzGpby1l8zLgkpXXrysXt4Mf-ykmrAHnvp2BWzNBbuY,858
29
+ camel/memories/context_creators/base.py,sha256=_mXCA8_zsItkMNKijoZhF1HU1R2hxQeQqxvW4eF5jTI,2743
30
+ camel/memories/context_creators/score_based.py,sha256=mpGVUUumL6Q2Mx73a77t7WqB6VxwiOp7cHAfrCatGz8,5065
31
+ camel/memories/records.py,sha256=ZYxai8_YbX2wRGaqF-KjBPD4mN479a4iZIH2yiR-hig,3618
32
+ camel/messages/__init__.py,sha256=xdg847kZH0ybo8Ue0Hafdxmdevz0xg7r9ET2Tbjy9IY,1468
33
+ camel/messages/base.py,sha256=2XQBCoKFdjQ-ASgtotcBnQGcvxwaGLYr0wPuDQud1pE,7872
34
+ camel/messages/func_message.py,sha256=nus7RHB055ygSlraLFtEhDLBLxtnL1PhIbdE94YfI98,3809
35
+ camel/models/__init__.py,sha256=PXumsv1-lURUMSsDNFQTtZTWshC2LRoOs6Duwjf3bxI,1026
36
+ camel/models/base_model.py,sha256=_2GnBwhXVoxb1r32sDbAar8FBtcPK_XRmSOUl2soZCU,3703
37
+ camel/models/model_factory.py,sha256=W-KAO0MoEGyQPUNKOHs_gReAEOp4IXNETVuu8tUaa3U,2007
38
+ camel/models/open_source_model.py,sha256=NW9cF6ustrjW7hoqGCYO3xRHe9Np7f-A8eafOUbp7fk,5702
39
+ camel/models/openai_model.py,sha256=35bYVX-tknwGdxqw6RAxRv2fnmvfsx0eEEqMo5wWtU4,3902
40
+ camel/models/stub_model.py,sha256=vm79L-hYy6SI3y_Y1XLx6vojkICrRwU4s684FCjksF0,3520
41
+ camel/prompts/__init__.py,sha256=ZzZW1Tlo5ALUansPrb6kJbGLwnBU9xB7ltqb5LgM_k8,1679
42
+ camel/prompts/ai_society.py,sha256=SVMDDgUC4RMcSixFD2KImsOfW2prWv0M-9QxAd_eEug,6226
43
+ camel/prompts/base.py,sha256=uLXK7S4kCFCU3gaq45pxTzzUhHvtOwpWhuMDFXzrMio,8272
44
+ camel/prompts/code.py,sha256=5LTlIzPHMtzDHSER5VDNrpsfxjBr8tAl75nV2ARrceQ,5784
45
+ camel/prompts/evaluation.py,sha256=2PCf1g47RdkJ1dlLCFlFsacQmtqNso97QHWEq9xbypA,1556
46
+ camel/prompts/misalignment.py,sha256=_mS07PnIIVikcrmbmJRQZaCIe3npap3NemOSx2yIEFo,4478
47
+ camel/prompts/prompt_templates.py,sha256=O3XcQT_vMJ3DCOelg03OH6uotFMS1P0mMkfPJctJVjw,4074
48
+ camel/prompts/role_description_prompt_template.py,sha256=qo8alwzCYDyGjoL_DUhu06el_ukYgedBMTerfARThYY,2499
49
+ camel/prompts/solution_extraction.py,sha256=SHfJEaZ6Ugknp3KodmdMUrmxuz3C2csTw90LoxkZ38Y,2068
50
+ camel/prompts/task_prompt_template.py,sha256=zNVYJ3xHdoZWXX8jm6Qd1xcJNsbSooOD6OALLpd0n0E,2197
51
+ camel/prompts/translation.py,sha256=DCQdjPHriaDm13LsKdPqWmpjPn8S8wZCu68OgsHLbxo,1861
52
+ camel/responses/__init__.py,sha256=edtTQskOgq5obyITziRFL62HTJP9sAikAtP9vrFacEQ,795
53
+ camel/responses/agent_responses.py,sha256=0wNmd9MkIbdvZzKUey_RI2OKfAnFUytnSSBJTdKvNN4,1698
54
+ camel/societies/__init__.py,sha256=HT6Gomxg5Rt03mUMvSDbdZtB1fAEpj7I9-ZAkRj9Jco,832
55
+ camel/societies/babyagi_playing.py,sha256=Pf4_F_aQS4AcTNS9Rv9EDwGguGyWyLmceakLWZ3J4IA,11768
56
+ camel/societies/role_playing.py,sha256=_VFnuPP7wfC3u3-RsdnX34qFxhXzruzaflC-upGUkx8,21327
57
+ camel/storages/__init__.py,sha256=ykzsUd2GL33MuwsjWaaK1JwBNpOm2qfKXYwx72G81E0,973
58
+ camel/storages/key_value_storages/__init__.py,sha256=lZOFtqj1iTQ9pRB4DkCYSMyPwEwaJDVzU06kM5jxFd4,916
59
+ camel/storages/key_value_storages/base.py,sha256=YqBFEU1IFYkpleHfLvK-sLFIWj7DgWn1t4KSsQzThks,2192
60
+ camel/storages/key_value_storages/in_memory.py,sha256=DRHSf_qGCcXAGDtI7nO0GMTewxxZWKrP3BZ9CgkwZfY,1964
61
+ camel/storages/key_value_storages/json.py,sha256=vgTr4gjFzaZceOZVd-ZHw7mht87uTXZXCBaYeY2FQ9I,3482
62
+ camel/terminators/__init__.py,sha256=CMOw8F8pdfUDzTl0SeeNpyOQgM_I0hAG95eouhCN0AY,997
63
+ camel/terminators/base.py,sha256=9ZI4pzQxWw1VmfXUCT9hwW0M64YVfo_ORMLMTq6G5Jk,1397
64
+ camel/terminators/response_terminator.py,sha256=IULccJ5bGfbkn3yMWD7enHM3xwlDfCWF8CZF9MKgxWQ,4910
65
+ camel/terminators/token_limit_terminator.py,sha256=fQAxKXVA4uc2H5ekovOm8gEAHlW5CpVutXwuIIORd2A,2065
66
+ camel/types/__init__.py,sha256=qpQU0gEOeNFZacjPd-ZJn2LJCPAUzVpSwfit2fA-Tgg,1656
67
+ camel/types/enums.py,sha256=KPKb45vR6WfDYANp0UTSK7m-pBbrm5VbPvIXnzc8MvU,5460
68
+ camel/types/openai_types.py,sha256=0jdELjDh4igCWNoyOQ2F3fTfnWeI5gXwNR0PVWM3ECk,2045
69
+ camel/utils/__init__.py,sha256=0dmETCrosrdVyFRqnbrlPd3AsUW_8SqPO8ho1NXX_Us,1490
70
+ camel/utils/commons.py,sha256=lWU1_D75e2ffEHXPI1_KfS0HWR2w3FC_uDhPPkqFS1w,7105
71
+ camel/utils/python_interpreter.py,sha256=mwG5IYh7EeF7cNP8WOqOpxH3l_j4rw-SL9v-Cv0RD2Q,18989
72
+ camel/utils/token_counting.py,sha256=ItoIxeAQl1gTDjPE1q8_8v5PAvvu3O6YK2zkD66HH0M,8061
73
+ camel_ai-0.1.1.dist-info/METADATA,sha256=e9I3e5_mJ146J8LoWwKEhFnymDAE2hdfpjvpci17RWs,18492
74
+ camel_ai-0.1.1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
75
+ camel_ai-0.1.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: poetry-core 1.8.1
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any