camel-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -11
- camel/agents/__init__.py +5 -5
- camel/agents/chat_agent.py +124 -63
- camel/agents/critic_agent.py +28 -17
- camel/agents/deductive_reasoner_agent.py +235 -0
- camel/agents/embodied_agent.py +92 -40
- camel/agents/role_assignment_agent.py +27 -17
- camel/agents/task_agent.py +60 -34
- camel/agents/tool_agents/base.py +0 -1
- camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
- camel/configs.py +119 -7
- camel/embeddings/__init__.py +2 -0
- camel/embeddings/base.py +3 -2
- camel/embeddings/openai_embedding.py +3 -3
- camel/embeddings/sentence_transformers_embeddings.py +65 -0
- camel/functions/__init__.py +13 -3
- camel/functions/google_maps_function.py +335 -0
- camel/functions/math_functions.py +7 -7
- camel/functions/openai_function.py +344 -42
- camel/functions/search_functions.py +100 -35
- camel/functions/twitter_function.py +484 -0
- camel/functions/weather_functions.py +36 -23
- camel/generators.py +65 -46
- camel/human.py +17 -11
- camel/interpreters/__init__.py +25 -0
- camel/interpreters/base.py +49 -0
- camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
- camel/interpreters/interpreter_error.py +19 -0
- camel/interpreters/subprocess_interpreter.py +190 -0
- camel/loaders/__init__.py +22 -0
- camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
- camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
- camel/memories/__init__.py +17 -7
- camel/memories/agent_memories.py +156 -0
- camel/memories/base.py +97 -32
- camel/memories/blocks/__init__.py +21 -0
- camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
- camel/memories/blocks/vectordb_block.py +101 -0
- camel/memories/context_creators/__init__.py +3 -2
- camel/memories/context_creators/score_based.py +32 -20
- camel/memories/records.py +6 -5
- camel/messages/__init__.py +2 -2
- camel/messages/base.py +99 -16
- camel/messages/func_message.py +7 -4
- camel/models/__init__.py +4 -2
- camel/models/anthropic_model.py +132 -0
- camel/models/base_model.py +3 -2
- camel/models/model_factory.py +10 -8
- camel/models/open_source_model.py +25 -13
- camel/models/openai_model.py +9 -10
- camel/models/stub_model.py +6 -5
- camel/prompts/__init__.py +7 -5
- camel/prompts/ai_society.py +21 -14
- camel/prompts/base.py +54 -47
- camel/prompts/code.py +22 -14
- camel/prompts/evaluation.py +8 -5
- camel/prompts/misalignment.py +26 -19
- camel/prompts/object_recognition.py +35 -0
- camel/prompts/prompt_templates.py +14 -8
- camel/prompts/role_description_prompt_template.py +16 -10
- camel/prompts/solution_extraction.py +9 -5
- camel/prompts/task_prompt_template.py +24 -21
- camel/prompts/translation.py +9 -5
- camel/responses/agent_responses.py +5 -2
- camel/retrievers/__init__.py +24 -0
- camel/retrievers/auto_retriever.py +319 -0
- camel/retrievers/base.py +64 -0
- camel/retrievers/bm25_retriever.py +149 -0
- camel/retrievers/vector_retriever.py +166 -0
- camel/societies/__init__.py +1 -1
- camel/societies/babyagi_playing.py +56 -32
- camel/societies/role_playing.py +188 -133
- camel/storages/__init__.py +18 -0
- camel/storages/graph_storages/__init__.py +23 -0
- camel/storages/graph_storages/base.py +82 -0
- camel/storages/graph_storages/graph_element.py +74 -0
- camel/storages/graph_storages/neo4j_graph.py +582 -0
- camel/storages/key_value_storages/base.py +1 -2
- camel/storages/key_value_storages/in_memory.py +1 -2
- camel/storages/key_value_storages/json.py +8 -13
- camel/storages/vectordb_storages/__init__.py +33 -0
- camel/storages/vectordb_storages/base.py +202 -0
- camel/storages/vectordb_storages/milvus.py +396 -0
- camel/storages/vectordb_storages/qdrant.py +371 -0
- camel/terminators/__init__.py +1 -1
- camel/terminators/base.py +2 -3
- camel/terminators/response_terminator.py +21 -12
- camel/terminators/token_limit_terminator.py +5 -3
- camel/types/__init__.py +12 -6
- camel/types/enums.py +86 -13
- camel/types/openai_types.py +10 -5
- camel/utils/__init__.py +18 -13
- camel/utils/commons.py +242 -81
- camel/utils/token_counting.py +135 -15
- {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/METADATA +116 -74
- camel_ai-0.1.3.dist-info/RECORD +101 -0
- {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/WHEEL +1 -1
- camel/memories/context_creators/base.py +0 -72
- camel_ai-0.1.1.dist-info/RECORD +0 -75
camel/utils/token_counting.py
CHANGED
|
@@ -11,11 +11,29 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import base64
|
|
14
18
|
from abc import ABC, abstractmethod
|
|
15
|
-
from
|
|
19
|
+
from io import BytesIO
|
|
20
|
+
from math import ceil
|
|
21
|
+
from typing import TYPE_CHECKING, List, Optional
|
|
22
|
+
|
|
23
|
+
from anthropic import Anthropic
|
|
24
|
+
from PIL import Image
|
|
16
25
|
|
|
17
|
-
from camel.
|
|
18
|
-
|
|
26
|
+
from camel.types import ModelType, OpenAIImageDetailType, OpenAIImageType
|
|
27
|
+
|
|
28
|
+
if TYPE_CHECKING:
|
|
29
|
+
from camel.messages import OpenAIMessage
|
|
30
|
+
|
|
31
|
+
LOW_DETAIL_TOKENS = 85
|
|
32
|
+
FIT_SQUARE_PIXELS = 2048
|
|
33
|
+
SHORTEST_SIDE_PIXELS = 768
|
|
34
|
+
SQUARE_PIXELS = 512
|
|
35
|
+
SQUARE_TOKENS = 170
|
|
36
|
+
EXTRA_TOKENS = 85
|
|
19
37
|
|
|
20
38
|
|
|
21
39
|
def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
|
|
@@ -45,8 +63,10 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
|
|
|
45
63
|
content = msg["content"]
|
|
46
64
|
if content:
|
|
47
65
|
if not isinstance(content, str):
|
|
48
|
-
raise ValueError(
|
|
49
|
-
|
|
66
|
+
raise ValueError(
|
|
67
|
+
"Currently multimodal context is not "
|
|
68
|
+
"supported by the token counter."
|
|
69
|
+
)
|
|
50
70
|
if i == 0:
|
|
51
71
|
ret += system_prompt + content
|
|
52
72
|
else:
|
|
@@ -64,8 +84,10 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
|
|
|
64
84
|
role = role_map[msg["role"]]
|
|
65
85
|
content = msg["content"]
|
|
66
86
|
if not isinstance(content, str):
|
|
67
|
-
raise ValueError(
|
|
68
|
-
|
|
87
|
+
raise ValueError(
|
|
88
|
+
"Currently multimodal context is not "
|
|
89
|
+
"supported by the token counter."
|
|
90
|
+
)
|
|
69
91
|
if content:
|
|
70
92
|
ret += role + ": " + content + seps[i % 2]
|
|
71
93
|
else:
|
|
@@ -85,6 +107,7 @@ def get_model_encoding(value_for_tiktoken: str):
|
|
|
85
107
|
tiktoken.Encoding: Model encoding.
|
|
86
108
|
"""
|
|
87
109
|
import tiktoken
|
|
110
|
+
|
|
88
111
|
try:
|
|
89
112
|
encoding = tiktoken.encoding_for_model(value_for_tiktoken)
|
|
90
113
|
except KeyError:
|
|
@@ -111,7 +134,6 @@ class BaseTokenCounter(ABC):
|
|
|
111
134
|
|
|
112
135
|
|
|
113
136
|
class OpenSourceTokenCounter(BaseTokenCounter):
|
|
114
|
-
|
|
115
137
|
def __init__(self, model_type: ModelType, model_path: str):
|
|
116
138
|
r"""Constructor for the token counter for open-source models.
|
|
117
139
|
|
|
@@ -126,6 +148,7 @@ class OpenSourceTokenCounter(BaseTokenCounter):
|
|
|
126
148
|
# If a fast tokenizer is not available for a given model,
|
|
127
149
|
# a normal Python-based tokenizer is returned instead.
|
|
128
150
|
from transformers import AutoTokenizer
|
|
151
|
+
|
|
129
152
|
try:
|
|
130
153
|
tokenizer = AutoTokenizer.from_pretrained(
|
|
131
154
|
model_path,
|
|
@@ -136,10 +159,11 @@ class OpenSourceTokenCounter(BaseTokenCounter):
|
|
|
136
159
|
model_path,
|
|
137
160
|
use_fast=False,
|
|
138
161
|
)
|
|
139
|
-
except:
|
|
162
|
+
except Exception:
|
|
140
163
|
raise ValueError(
|
|
141
164
|
f"Invalid `model_path` ({model_path}) is provided. "
|
|
142
|
-
"Tokenizer loading failed."
|
|
165
|
+
"Tokenizer loading failed."
|
|
166
|
+
)
|
|
143
167
|
|
|
144
168
|
self.tokenizer = tokenizer
|
|
145
169
|
self.model_type = model_type
|
|
@@ -162,13 +186,11 @@ class OpenSourceTokenCounter(BaseTokenCounter):
|
|
|
162
186
|
|
|
163
187
|
|
|
164
188
|
class OpenAITokenCounter(BaseTokenCounter):
|
|
165
|
-
|
|
166
189
|
def __init__(self, model: ModelType):
|
|
167
190
|
r"""Constructor for the token counter for OpenAI models.
|
|
168
191
|
|
|
169
192
|
Args:
|
|
170
|
-
|
|
171
|
-
counted.
|
|
193
|
+
model (ModelType): Model type for which tokens will be counted.
|
|
172
194
|
"""
|
|
173
195
|
self.model: str = model.value_for_tiktoken
|
|
174
196
|
|
|
@@ -192,7 +214,8 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
192
214
|
"for information on how messages are converted to tokens. "
|
|
193
215
|
"See https://platform.openai.com/docs/models/gpt-4"
|
|
194
216
|
"or https://platform.openai.com/docs/models/gpt-3-5"
|
|
195
|
-
"for information about openai chat models."
|
|
217
|
+
"for information about openai chat models."
|
|
218
|
+
)
|
|
196
219
|
|
|
197
220
|
self.encoding = get_model_encoding(self.model)
|
|
198
221
|
|
|
@@ -211,10 +234,107 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
211
234
|
for message in messages:
|
|
212
235
|
num_tokens += self.tokens_per_message
|
|
213
236
|
for key, value in message.items():
|
|
214
|
-
|
|
237
|
+
if not isinstance(value, list):
|
|
238
|
+
num_tokens += len(self.encoding.encode(str(value)))
|
|
239
|
+
else:
|
|
240
|
+
for item in value:
|
|
241
|
+
if item["type"] == "text":
|
|
242
|
+
num_tokens += len(
|
|
243
|
+
self.encoding.encode(str(item["text"]))
|
|
244
|
+
)
|
|
245
|
+
elif item["type"] == "image_url":
|
|
246
|
+
image_str: str = item["image_url"]["url"]
|
|
247
|
+
detail = item["image_url"]["detail"]
|
|
248
|
+
image_prefix_format = "data:image/{};base64,"
|
|
249
|
+
image_prefix: Optional[str] = None
|
|
250
|
+
for image_type in list(OpenAIImageType):
|
|
251
|
+
# Find the correct image format
|
|
252
|
+
image_prefix = image_prefix_format.format(
|
|
253
|
+
image_type.value
|
|
254
|
+
)
|
|
255
|
+
if image_prefix in image_str:
|
|
256
|
+
break
|
|
257
|
+
assert isinstance(image_prefix, str)
|
|
258
|
+
encoded_image = image_str.split(image_prefix)[1]
|
|
259
|
+
image_bytes = BytesIO(
|
|
260
|
+
base64.b64decode(encoded_image)
|
|
261
|
+
)
|
|
262
|
+
image = Image.open(image_bytes)
|
|
263
|
+
num_tokens += count_tokens_from_image(
|
|
264
|
+
image, OpenAIImageDetailType(detail)
|
|
265
|
+
)
|
|
215
266
|
if key == "name":
|
|
216
267
|
num_tokens += self.tokens_per_name
|
|
217
268
|
|
|
218
269
|
# every reply is primed with <|start|>assistant<|message|>
|
|
219
270
|
num_tokens += 3
|
|
220
271
|
return num_tokens
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
class AnthropicTokenCounter(BaseTokenCounter):
|
|
275
|
+
def __init__(self, model_type: ModelType):
|
|
276
|
+
r"""Constructor for the token counter for Anthropic models.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
model_type (ModelType): Model type for which tokens will be
|
|
280
|
+
counted.
|
|
281
|
+
"""
|
|
282
|
+
|
|
283
|
+
self.model_type = model_type
|
|
284
|
+
self.client = Anthropic()
|
|
285
|
+
self.tokenizer = self.client.get_tokenizer()
|
|
286
|
+
|
|
287
|
+
def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
|
|
288
|
+
r"""Count number of tokens in the provided message list using
|
|
289
|
+
loaded tokenizer specific for this type of model.
|
|
290
|
+
|
|
291
|
+
Args:
|
|
292
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
293
|
+
in OpenAI API format.
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
int: Number of tokens in the messages.
|
|
297
|
+
"""
|
|
298
|
+
prompt = messages_to_prompt(messages, self.model_type)
|
|
299
|
+
|
|
300
|
+
return self.client.count_tokens(prompt)
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def count_tokens_from_image(
|
|
304
|
+
image: Image.Image, detail: OpenAIImageDetailType
|
|
305
|
+
) -> int:
|
|
306
|
+
r"""Count image tokens for OpenAI vision model. An :obj:`"auto"`
|
|
307
|
+
resolution model will be treated as :obj:`"high"`. All images with
|
|
308
|
+
:obj:`"low"` detail cost 85 tokens each. Images with :obj:`"high"` detail
|
|
309
|
+
are first scaled to fit within a 2048 x 2048 square, maintaining their
|
|
310
|
+
aspect ratio. Then, they are scaled such that the shortest side of the
|
|
311
|
+
image is 768px long. Finally, we count how many 512px squares the image
|
|
312
|
+
consists of. Each of those squares costs 170 tokens. Another 85 tokens are
|
|
313
|
+
always added to the final total. For more details please refer to `OpenAI
|
|
314
|
+
vision docs <https://platform.openai.com/docs/guides/vision>`_
|
|
315
|
+
|
|
316
|
+
Args:
|
|
317
|
+
image (PIL.Image.Image): Image to count number of tokens.
|
|
318
|
+
detail (OpenAIImageDetailType): Image detail type to count
|
|
319
|
+
number of tokens.
|
|
320
|
+
|
|
321
|
+
Returns:
|
|
322
|
+
int: Number of tokens for the image given a detail type.
|
|
323
|
+
"""
|
|
324
|
+
if detail == OpenAIImageDetailType.LOW:
|
|
325
|
+
return LOW_DETAIL_TOKENS
|
|
326
|
+
|
|
327
|
+
width, height = image.size
|
|
328
|
+
if width > FIT_SQUARE_PIXELS or height > FIT_SQUARE_PIXELS:
|
|
329
|
+
scaling_factor = max(width, height) / FIT_SQUARE_PIXELS
|
|
330
|
+
width = int(width / scaling_factor)
|
|
331
|
+
height = int(height / scaling_factor)
|
|
332
|
+
|
|
333
|
+
scaling_factor = min(width, height) / SHORTEST_SIDE_PIXELS
|
|
334
|
+
scaled_width = int(width / scaling_factor)
|
|
335
|
+
scaled_height = int(height / scaling_factor)
|
|
336
|
+
|
|
337
|
+
h = ceil(scaled_height / SQUARE_PIXELS)
|
|
338
|
+
w = ceil(scaled_width / SQUARE_PIXELS)
|
|
339
|
+
total = EXTRA_TOKENS + SQUARE_TOKENS * h * w
|
|
340
|
+
return total
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -13,74 +13,71 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.10
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.11
|
|
15
15
|
Provides-Extra: all
|
|
16
|
+
Provides-Extra: encoders
|
|
17
|
+
Provides-Extra: graph-storages
|
|
16
18
|
Provides-Extra: huggingface-agent
|
|
19
|
+
Provides-Extra: retrievers
|
|
17
20
|
Provides-Extra: test
|
|
18
21
|
Provides-Extra: tools
|
|
22
|
+
Provides-Extra: vector-databases
|
|
19
23
|
Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
|
|
20
24
|
Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
21
|
-
Requires-Dist:
|
|
25
|
+
Requires-Dist: anthropic (>=0.21.3,<0.22.0)
|
|
22
26
|
Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
|
|
23
27
|
Requires-Dist: colorama (>=0,<1)
|
|
24
28
|
Requires-Dist: datasets (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
|
|
25
29
|
Requires-Dist: diffusers (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
30
|
+
Requires-Dist: docstring-parser (>=0.15,<0.16)
|
|
26
31
|
Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
|
|
32
|
+
Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
|
|
27
33
|
Requires-Dist: jsonschema (>=4,<5)
|
|
28
34
|
Requires-Dist: mock (>=5,<6) ; extra == "test"
|
|
35
|
+
Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
|
|
29
36
|
Requires-Dist: numpy (>=1,<2)
|
|
30
37
|
Requires-Dist: openai (>=1.2.3,<2.0.0)
|
|
31
38
|
Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
|
|
39
|
+
Requires-Dist: pathlib (>=1.0.1,<2.0.0)
|
|
32
40
|
Requires-Dist: protobuf (>=4,<5)
|
|
41
|
+
Requires-Dist: pydantic (>=1.9,<3)
|
|
42
|
+
Requires-Dist: pymilvus (>=2.4.0,<3.0.0) ; extra == "vector-databases" or extra == "all"
|
|
33
43
|
Requires-Dist: pyowm (>=3.3.0,<4.0.0) ; extra == "tools" or extra == "all"
|
|
34
44
|
Requires-Dist: pytest (>=7,<8) ; extra == "test"
|
|
45
|
+
Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
|
|
46
|
+
Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
|
|
47
|
+
Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
|
|
48
|
+
Requires-Dist: sentence-transformers (>=2.2.2,<3.0.0) ; extra == "encoders" or extra == "all"
|
|
35
49
|
Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
36
50
|
Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
37
51
|
Requires-Dist: tiktoken (>=0,<1)
|
|
38
52
|
Requires-Dist: torch (>=1,<2) ; extra == "huggingface-agent" or extra == "all"
|
|
39
53
|
Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
|
|
40
|
-
Requires-Dist: unstructured (>=0.10.30,<0.11.0) ; extra == "tools" or extra == "all"
|
|
54
|
+
Requires-Dist: unstructured[all-docs] (>=0.10.30,<0.11.0) ; extra == "tools" or extra == "all"
|
|
41
55
|
Requires-Dist: wikipedia (>=1,<2) ; extra == "tools" or extra == "all"
|
|
56
|
+
Requires-Dist: wolframalpha (>=5.0.0,<6.0.0) ; extra == "tools" or extra == "all"
|
|
42
57
|
Project-URL: Documentation, https://docs.camel-ai.org
|
|
43
58
|
Project-URL: Repository, https://github.com/camel-ai/camel
|
|
44
59
|
Description-Content-Type: text/markdown
|
|
45
60
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
<img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/CamelAIOrg?style=social&color=brightgreen&logo=twitter" />
|
|
64
|
-
</a>
|
|
65
|
-
</div>
|
|
66
|
-
|
|
67
|
-
# CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society
|
|
68
|
-
|
|
69
|
-
<div align="center">
|
|
70
|
-
|
|
71
|
-
<a></a>
|
|
72
|
-
<a href="https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml"></a>
|
|
73
|
-
<a href="https://camel-ai.github.io/camel/">
|
|
74
|
-

|
|
75
|
-
</a>
|
|
76
|
-
<a href="https://github.com/camel-ai/camel/stargazers" target="_blank">
|
|
77
|
-
<img alt="GitHub Repo Stars" src="https://img.shields.io/github/stars/camel-ai/camel?label=stars&logo=github&color=brightgreen" />
|
|
78
|
-
</a>
|
|
79
|
-
<a href="https://github.com/camel-ai/camel/blob/master/licenses/LICENSE"></a>
|
|
80
|
-
</div>
|
|
61
|
+
[![Colab][colab-image]][colab-url]
|
|
62
|
+
[![Hugging Face][huggingface-image]][huggingface-url]
|
|
63
|
+
[![Slack][slack-image]][slack-url]
|
|
64
|
+
[![Discord][discord-image]][discord-url]
|
|
65
|
+
[![Wechat][wechat-image]][wechat-url]
|
|
66
|
+
[![Twitter][twitter-image]][twitter-url]
|
|
67
|
+
|
|
68
|
+
______________________________________________________________________
|
|
69
|
+
|
|
70
|
+
# CAMEL: Communicative Agents for “Mind” Exploration of Large Language Model Society
|
|
71
|
+
|
|
72
|
+
[![Python Version][python-image]][python-url]
|
|
73
|
+
[![PyTest Status][pytest-image]][pytest-url]
|
|
74
|
+
[![Documentation][docs-image]][docs-url]
|
|
75
|
+
[![Star][star-image]][star-url]
|
|
76
|
+
[![Package License][package-license-image]][package-license-url]
|
|
77
|
+
[![Data License][data-license-image]][data-license-url]
|
|
81
78
|
|
|
82
79
|
<p align="center">
|
|
83
|
-
|
|
80
|
+
<a href="https://github.com/camel-ai/camel#community">Community</a> |
|
|
84
81
|
<a href="https://github.com/camel-ai/camel#installation">Installation</a> |
|
|
85
82
|
<a href="https://camel-ai.github.io/camel/">Documentation</a> |
|
|
86
83
|
<a href="https://github.com/camel-ai/camel/tree/HEAD/examples">Examples</a> |
|
|
@@ -91,7 +88,7 @@ Description-Content-Type: text/markdown
|
|
|
91
88
|
</p>
|
|
92
89
|
|
|
93
90
|
<p align="center">
|
|
94
|
-
<img src='
|
|
91
|
+
<img src='https://raw.githubusercontent.com/camel-ai/camel/master/misc/primary_logo.png' width=800>
|
|
95
92
|
</p>
|
|
96
93
|
|
|
97
94
|
## Overview
|
|
@@ -100,28 +97,46 @@ The rapid advancement of conversational and chat-based language models has led t
|
|
|
100
97
|
## Community
|
|
101
98
|
🐫 CAMEL is an open-source library designed for the study of autonomous and communicative agents. We believe that studying these agents on a large scale offers valuable insights into their behaviors, capabilities, and potential risks. To facilitate research in this field, we implement and support various types of agents, tasks, prompts, models, and simulated environments.
|
|
102
99
|
|
|
103
|
-
Join us ([*Slack*](https://join.slack.com/t/camel-
|
|
100
|
+
Join us ([*Slack*](https://join.slack.com/t/camel-ai/shared_invite/zt-2g7xc41gy-_7rcrNNAArIP6sLQqldkqQ), [*Discord*](https://discord.gg/CNcNpquyDc) or [*WeChat*](https://ghli.org/camel/wechat.png)) in pushing the boundaries of building AI Society.
|
|
104
101
|
|
|
105
102
|
## Try it yourself
|
|
106
103
|
We provide a [](https://colab.research.google.com/drive/1AzP33O8rnMW__7ocWJhVBXjKziJXPtim?usp=sharing) demo showcasing a conversation between two ChatGPT agents playing roles as a python programmer and a stock trader collaborating on developing a trading bot for stock market.
|
|
107
104
|
|
|
108
105
|
<p align="center">
|
|
109
|
-
<img src='
|
|
106
|
+
<img src='https://raw.githubusercontent.com/camel-ai/camel/master/misc/framework.png' width=800>
|
|
110
107
|
</p>
|
|
111
108
|
|
|
112
|
-
##
|
|
109
|
+
## Installation
|
|
113
110
|
|
|
114
|
-
|
|
111
|
+
### From PyPI
|
|
115
112
|
|
|
116
|
-
|
|
113
|
+
To install the base CAMEL library:
|
|
114
|
+
```bash
|
|
115
|
+
pip install camel-ai
|
|
116
|
+
```
|
|
117
|
+
Some features require extra dependencies:
|
|
118
|
+
- To use the HuggingFace agents:
|
|
119
|
+
```bash
|
|
120
|
+
pip install 'camel-ai[huggingface-agent]'
|
|
121
|
+
```
|
|
122
|
+
- To enable RAG or use agent memory:
|
|
123
|
+
```bash
|
|
124
|
+
pip install 'camel-ai[tools]'
|
|
125
|
+
```
|
|
126
|
+
- To install with all dependencies:
|
|
127
|
+
```bash
|
|
128
|
+
pip install 'camel-ai[all]'
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### From Source
|
|
117
132
|
|
|
118
133
|
Install `CAMEL` from source with poetry (Recommended):
|
|
119
134
|
```sh
|
|
135
|
+
# Make sure your python version is later than 3.9
|
|
136
|
+
# You can use pyenv to manage multiple python verisons in your sytstem
|
|
137
|
+
|
|
120
138
|
# Clone github repo
|
|
121
|
-
# For the latest code:
|
|
122
139
|
git clone https://github.com/camel-ai/camel.git
|
|
123
|
-
# Or for the stable code:
|
|
124
|
-
git clone -b v0.1.0 https://github.com/camel-ai/camel.git
|
|
125
140
|
|
|
126
141
|
# Change directory into project directory
|
|
127
142
|
cd camel
|
|
@@ -133,10 +148,8 @@ poetry shell
|
|
|
133
148
|
# It takes about 90 seconds to resolve dependencies
|
|
134
149
|
poetry install
|
|
135
150
|
|
|
136
|
-
# Or if you want to use
|
|
137
|
-
poetry install -E
|
|
138
|
-
|
|
139
|
-
# do something with camel
|
|
151
|
+
# Or if you want to use all other extra packages
|
|
152
|
+
poetry install -E all # (Optional)
|
|
140
153
|
|
|
141
154
|
# Exit the virtual environment
|
|
142
155
|
exit
|
|
@@ -151,7 +164,7 @@ conda create --name camel python=3.10
|
|
|
151
164
|
conda activate camel
|
|
152
165
|
|
|
153
166
|
# Clone github repo
|
|
154
|
-
git clone -b v0.1.
|
|
167
|
+
git clone -b v0.1.3 https://github.com/camel-ai/camel.git
|
|
155
168
|
|
|
156
169
|
# Change directory into project directory
|
|
157
170
|
cd camel
|
|
@@ -159,13 +172,19 @@ cd camel
|
|
|
159
172
|
# Install camel from source
|
|
160
173
|
pip install -e .
|
|
161
174
|
|
|
162
|
-
# Or if you want to use
|
|
163
|
-
pip install -e .[
|
|
175
|
+
# Or if you want to use all other extra packages
|
|
176
|
+
pip install -e .[all] # (Optional)
|
|
164
177
|
```
|
|
178
|
+
|
|
179
|
+
## Documentation
|
|
180
|
+
|
|
181
|
+
[CAMEL package documentation pages](https://camel-ai.github.io/camel/).
|
|
182
|
+
|
|
165
183
|
## Example
|
|
166
|
-
You can find a list of tasks for different set of assistant and user role pairs [here](https://drive.google.com/file/d/194PPaSTBR07m-PzjS-Ty6KlPLdFIPQDd/view?usp=share_link)
|
|
167
184
|
|
|
168
|
-
|
|
185
|
+
You can find a list of tasks for different sets of assistant and user role pairs [here](https://drive.google.com/file/d/194PPaSTBR07m-PzjS-Ty6KlPLdFIPQDd/view?usp=share_link).
|
|
186
|
+
|
|
187
|
+
As an example, to run the `role_playing.py` script:
|
|
169
188
|
|
|
170
189
|
First, you need to add your OpenAI API key to system environment variables. The method to do this depends on your operating system and the shell you're using.
|
|
171
190
|
|
|
@@ -191,7 +210,6 @@ set OPENAI_API_BASE_URL=<inert your OpenAI API BASE URL> #(Should you utilize a
|
|
|
191
210
|
# Export your OpenAI API key
|
|
192
211
|
$env:OPENAI_API_KEY="<insert your OpenAI API key>"
|
|
193
212
|
$env:OPENAI_API_BASE_URL="<inert your OpenAI API BASE URL>" #(Should you utilize an OpenAI proxy service, kindly specify this)
|
|
194
|
-
|
|
195
213
|
```
|
|
196
214
|
|
|
197
215
|
Replace `<insert your OpenAI API key>` with your actual OpenAI API key in each case. Make sure there are no spaces around the `=` sign.
|
|
@@ -208,7 +226,7 @@ Please note that the environment variable is session-specific. If you open a new
|
|
|
208
226
|
|
|
209
227
|
## Use Open-Source Models as Backends
|
|
210
228
|
|
|
211
|
-
The basic workflow of using an open-sourced model as the backend is based on an external server running LLM inference service, e.g. during the development we chose [FastChat](https://github.com/lm-sys/FastChat) to run the service.
|
|
229
|
+
The basic workflow of using an open-sourced model as the backend is based on an external server running LLM inference service, e.g. during the development we chose [FastChat](https://github.com/lm-sys/FastChat) to run the service.
|
|
212
230
|
|
|
213
231
|
We do not fix the choice of server to decouple the implementation of any specific LLM inference server with CAMEL (indicating the server needs to be deployed by the user himself). But the server to be deployed must satisfy that **it supports OpenAI-compatible APIs, especially the method `openai.ChatCompletion.create`**.
|
|
214
232
|
|
|
@@ -227,7 +245,7 @@ python3 -m fastchat.serve.model_worker --model-path meta-llama/Llama-2-7b-chat-h
|
|
|
227
245
|
python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
|
|
228
246
|
```
|
|
229
247
|
|
|
230
|
-
2. After observing the controller successfully receiving the heart beat signal from the worker, the server should be ready for use at http://localhost:8000/v1.
|
|
248
|
+
2. After observing the controller successfully receiving the heart beat signal from the worker, the server should be ready for use at http://localhost:8000/v1.
|
|
231
249
|
|
|
232
250
|
3. Then we can try on running `role_playing_with_open_source_model.py`, where each agent in this example is initialized with specifying the `model_path` and `server_url`, similar to the example code below:
|
|
233
251
|
|
|
@@ -256,21 +274,21 @@ agent = ChatAgent(
|
|
|
256
274
|
- example: [lmsys/vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5)
|
|
257
275
|
|
|
258
276
|
## Data (Hosted on Hugging Face)
|
|
259
|
-
| Dataset
|
|
260
|
-
|
|
277
|
+
| Dataset | Chat format | Instruction format | Chat format (translated) |
|
|
278
|
+
|----------------|-----------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
|
|
261
279
|
| **AI Society** | [Chat format](https://huggingface.co/datasets/camel-ai/ai_society/blob/main/ai_society_chat.tar.gz) | [Instruction format](https://huggingface.co/datasets/camel-ai/ai_society/blob/main/ai_society_instructions.json) | [Chat format (translated)](https://huggingface.co/datasets/camel-ai/ai_society_translated) |
|
|
262
|
-
| **Code**
|
|
263
|
-
| **Math**
|
|
264
|
-
| **Physics**
|
|
265
|
-
| **Chemistry**
|
|
266
|
-
| **Biology**
|
|
280
|
+
| **Code** | [Chat format](https://huggingface.co/datasets/camel-ai/code/blob/main/code_chat.tar.gz) | [Instruction format](https://huggingface.co/datasets/camel-ai/code/blob/main/code_instructions.json) | x |
|
|
281
|
+
| **Math** | [Chat format](https://huggingface.co/datasets/camel-ai/math) | x | x |
|
|
282
|
+
| **Physics** | [Chat format](https://huggingface.co/datasets/camel-ai/physics) | x | x |
|
|
283
|
+
| **Chemistry** | [Chat format](https://huggingface.co/datasets/camel-ai/chemistry) | x | x |
|
|
284
|
+
| **Biology** | [Chat format](https://huggingface.co/datasets/camel-ai/biology) | x | x |
|
|
267
285
|
|
|
268
286
|
## Visualizations of Instructions and Tasks
|
|
269
287
|
|
|
270
|
-
| Dataset
|
|
271
|
-
|
|
272
|
-
| **AI Society**
|
|
273
|
-
| **Code**
|
|
288
|
+
| Dataset | Instructions | Tasks |
|
|
289
|
+
|------------------|----------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------|
|
|
290
|
+
| **AI Society** | [Instructions](https://atlas.nomic.ai/map/3a559a06-87d0-4476-a879-962656242452/db961915-b254-48e8-8e5c-917f827b74c6) | [Tasks](https://atlas.nomic.ai/map/cb96f41b-a6fd-4fe4-ac40-08e101714483/ae06156c-a572-46e9-8345-ebe18586d02b) |
|
|
291
|
+
| **Code** | [Instructions](https://atlas.nomic.ai/map/902d6ccb-0bbb-4294-83a8-1c7d2dae03c8/ace2e146-e49f-41db-a1f4-25a2c4be2457) | [Tasks](https://atlas.nomic.ai/map/efc38617-9180-490a-8630-43a05b35d22d/2576addf-a133-45d5-89a9-6b067b6652dd) |
|
|
274
292
|
| **Misalignment** | [Instructions](https://atlas.nomic.ai/map/5c491035-a26e-4a05-9593-82ffb2c3ab40/2bd98896-894e-4807-9ed8-a203ccb14d5e) | [Tasks](https://atlas.nomic.ai/map/abc357dd-9c04-4913-9541-63e259d7ac1f/825139a4-af66-427c-9d0e-f36b5492ab3f) |
|
|
275
293
|
|
|
276
294
|
## Implemented Research Ideas from Other Works
|
|
@@ -293,12 +311,10 @@ We implemented amazing research ideas from other works for you to build, compare
|
|
|
293
311
|
## Acknowledgement
|
|
294
312
|
Special thanks to [Nomic AI](https://home.nomic.ai/) for giving us extended access to their data set exploration tool (Atlas).
|
|
295
313
|
|
|
296
|
-
We would also like to thank Haya Hammoud for designing the logo of our project.
|
|
314
|
+
We would also like to thank Haya Hammoud for designing the initial logo of our project.
|
|
297
315
|
|
|
298
316
|
## License
|
|
299
317
|
|
|
300
|
-
The intended purpose and licensing of CAMEL is solely for research use.
|
|
301
|
-
|
|
302
318
|
The source code is licensed under Apache 2.0.
|
|
303
319
|
|
|
304
320
|
The datasets are licensed under CC BY NC 4.0, which permits only non-commercial usage. It is advised that any models trained using the dataset should not be utilized for anything other than research purposes.
|
|
@@ -309,3 +325,29 @@ We appreciate your interest in contributing to our open-source initiative. We pr
|
|
|
309
325
|
## Contact
|
|
310
326
|
For more information please contact camel.ai.team@gmail.com.
|
|
311
327
|
|
|
328
|
+
[python-image]: https://img.shields.io/badge/Python-3.9%2B-brightgreen.svg
|
|
329
|
+
[python-url]: https://docs.python.org/3.9/
|
|
330
|
+
[pytest-image]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml/badge.svg
|
|
331
|
+
[pytest-url]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml
|
|
332
|
+
[docs-image]: https://img.shields.io/badge/Documentation-grey.svg?logo=github
|
|
333
|
+
[docs-url]: https://camel-ai.github.io/camel/index.html
|
|
334
|
+
[star-image]: https://img.shields.io/github/stars/camel-ai/camel?label=stars&logo=github&color=brightgreen
|
|
335
|
+
[star-url]: https://github.com/camel-ai/camel/stargazers
|
|
336
|
+
[package-license-image]: https://img.shields.io/badge/License-Apache_2.0-blue.svg
|
|
337
|
+
[package-license-url]: https://github.com/camel-ai/camel/blob/master/licenses/LICENSE
|
|
338
|
+
[data-license-image]: https://img.shields.io/badge/License-CC_BY--NC_4.0-lightgrey.svg
|
|
339
|
+
[data-license-url]: https://github.com/camel-ai/camel/blob/master/licenses/DATA_LICENSE
|
|
340
|
+
|
|
341
|
+
[colab-url]: https://colab.research.google.com/drive/1AzP33O8rnMW__7ocWJhVBXjKziJXPtim?usp=sharing
|
|
342
|
+
[colab-image]: https://colab.research.google.com/assets/colab-badge.svg
|
|
343
|
+
[huggingface-url]: https://huggingface.co/camel-ai
|
|
344
|
+
[huggingface-image]: https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-CAMEL--AI-ffc107?color=ffc107&logoColor=white
|
|
345
|
+
[slack-url]: https://join.slack.com/t/camel-ai/shared_invite/zt-2g7xc41gy-_7rcrNNAArIP6sLQqldkqQ
|
|
346
|
+
[slack-image]: https://img.shields.io/badge/Slack-CAMEL--AI-blueviolet?logo=slack
|
|
347
|
+
[discord-url]: https://discord.gg/CNcNpquyDc
|
|
348
|
+
[discord-image]: https://img.shields.io/badge/Discord-CAMEL--AI-7289da?logo=discord&logoColor=white&color=7289da
|
|
349
|
+
[wechat-url]: https://ghli.org/camel/wechat.png
|
|
350
|
+
[wechat-image]: https://img.shields.io/badge/WeChat-CamelAIOrg-brightgreen?logo=wechat&logoColor=white
|
|
351
|
+
[twitter-url]: https://twitter.com/CamelAIOrg
|
|
352
|
+
[twitter-image]: https://img.shields.io/twitter/follow/CamelAIOrg?style=social&color=brightgreen&logo=twitter
|
|
353
|
+
|