camel-ai 0.2.3__py3-none-any.whl → 0.2.3a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (87) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +69 -93
  3. camel/agents/knowledge_graph_agent.py +6 -4
  4. camel/bots/__init__.py +2 -16
  5. camel/bots/discord_bot.py +206 -0
  6. camel/configs/__init__.py +2 -1
  7. camel/configs/anthropic_config.py +5 -2
  8. camel/configs/base_config.py +6 -6
  9. camel/configs/groq_config.py +3 -2
  10. camel/configs/ollama_config.py +2 -1
  11. camel/configs/openai_config.py +23 -2
  12. camel/configs/samba_config.py +2 -2
  13. camel/configs/togetherai_config.py +1 -1
  14. camel/configs/vllm_config.py +1 -1
  15. camel/configs/zhipuai_config.py +3 -2
  16. camel/embeddings/openai_embedding.py +2 -2
  17. camel/loaders/__init__.py +0 -2
  18. camel/loaders/firecrawl_reader.py +3 -3
  19. camel/loaders/unstructured_io.py +33 -35
  20. camel/messages/__init__.py +0 -1
  21. camel/models/__init__.py +4 -2
  22. camel/models/anthropic_model.py +26 -32
  23. camel/models/azure_openai_model.py +36 -39
  24. camel/models/base_model.py +20 -31
  25. camel/models/gemini_model.py +29 -37
  26. camel/models/groq_model.py +23 -29
  27. camel/models/litellm_model.py +61 -44
  28. camel/models/mistral_model.py +29 -32
  29. camel/models/model_factory.py +76 -66
  30. camel/models/nemotron_model.py +23 -33
  31. camel/models/ollama_model.py +47 -42
  32. camel/models/open_source_model.py +170 -0
  33. camel/models/{openai_compatible_model.py → openai_compatibility_model.py} +49 -31
  34. camel/models/openai_model.py +29 -48
  35. camel/models/reka_model.py +28 -30
  36. camel/models/samba_model.py +177 -82
  37. camel/models/stub_model.py +2 -2
  38. camel/models/togetherai_model.py +43 -37
  39. camel/models/vllm_model.py +50 -43
  40. camel/models/zhipuai_model.py +27 -33
  41. camel/retrievers/auto_retriever.py +10 -28
  42. camel/retrievers/vector_retriever.py +47 -58
  43. camel/societies/babyagi_playing.py +3 -6
  44. camel/societies/role_playing.py +3 -5
  45. camel/storages/graph_storages/graph_element.py +5 -3
  46. camel/storages/key_value_storages/json.py +1 -6
  47. camel/toolkits/__init__.py +7 -20
  48. camel/toolkits/base.py +3 -2
  49. camel/toolkits/code_execution.py +7 -6
  50. camel/toolkits/dalle_toolkit.py +6 -6
  51. camel/toolkits/github_toolkit.py +10 -9
  52. camel/toolkits/google_maps_toolkit.py +7 -7
  53. camel/toolkits/linkedin_toolkit.py +7 -7
  54. camel/toolkits/math_toolkit.py +8 -8
  55. camel/toolkits/open_api_toolkit.py +5 -5
  56. camel/toolkits/{function_tool.py → openai_function.py} +11 -34
  57. camel/toolkits/reddit_toolkit.py +7 -7
  58. camel/toolkits/retrieval_toolkit.py +5 -5
  59. camel/toolkits/search_toolkit.py +9 -9
  60. camel/toolkits/slack_toolkit.py +11 -11
  61. camel/toolkits/twitter_toolkit.py +452 -378
  62. camel/toolkits/weather_toolkit.py +6 -6
  63. camel/types/__init__.py +1 -6
  64. camel/types/enums.py +85 -40
  65. camel/types/openai_types.py +0 -3
  66. camel/utils/__init__.py +2 -0
  67. camel/utils/async_func.py +7 -7
  68. camel/utils/commons.py +3 -32
  69. camel/utils/token_counting.py +212 -30
  70. camel/workforce/role_playing_worker.py +1 -1
  71. camel/workforce/single_agent_worker.py +1 -1
  72. camel/workforce/task_channel.py +3 -4
  73. camel/workforce/workforce.py +4 -4
  74. {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a0.dist-info}/METADATA +56 -27
  75. {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a0.dist-info}/RECORD +76 -85
  76. {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a0.dist-info}/WHEEL +1 -1
  77. camel/bots/discord_app.py +0 -138
  78. camel/bots/slack/__init__.py +0 -30
  79. camel/bots/slack/models.py +0 -158
  80. camel/bots/slack/slack_app.py +0 -255
  81. camel/loaders/chunkr_reader.py +0 -163
  82. camel/toolkits/arxiv_toolkit.py +0 -155
  83. camel/toolkits/ask_news_toolkit.py +0 -653
  84. camel/toolkits/google_scholar_toolkit.py +0 -146
  85. camel/toolkits/whatsapp_toolkit.py +0 -177
  86. camel/types/unified_model_type.py +0 -104
  87. camel_ai-0.2.3.dist-info/LICENSE +0 -201
@@ -20,15 +20,10 @@ from io import BytesIO
20
20
  from math import ceil
21
21
  from typing import TYPE_CHECKING, List, Optional
22
22
 
23
+ from anthropic import Anthropic
23
24
  from PIL import Image
24
25
 
25
- from camel.types import (
26
- ModelType,
27
- OpenAIImageType,
28
- OpenAIVisionDetailType,
29
- UnifiedModelType,
30
- )
31
- from camel.utils import dependencies_required
26
+ from camel.types import ModelType, OpenAIImageType, OpenAIVisionDetailType
32
27
 
33
28
  if TYPE_CHECKING:
34
29
  from mistral_common.protocol.instruct.request import ( # type:ignore[import-not-found]
@@ -45,6 +40,145 @@ SQUARE_TOKENS = 170
45
40
  EXTRA_TOKENS = 85
46
41
 
47
42
 
43
+ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
44
+ r"""Parse the message list into a single prompt following model-specific
45
+ formats.
46
+
47
+ Args:
48
+ messages (List[OpenAIMessage]): Message list with the chat history
49
+ in OpenAI API format.
50
+ model (ModelType): Model type for which messages will be parsed.
51
+
52
+ Returns:
53
+ str: A single prompt summarizing all the messages.
54
+ """
55
+ system_message = messages[0]["content"]
56
+
57
+ ret: str
58
+ if model in [
59
+ ModelType.LLAMA_2,
60
+ ModelType.LLAMA_3,
61
+ ModelType.GROQ_LLAMA_3_8B,
62
+ ModelType.GROQ_LLAMA_3_70B,
63
+ ]:
64
+ # reference: https://github.com/facebookresearch/llama/blob/cfc3fc8c1968d390eb830e65c63865e980873a06/llama/generation.py#L212
65
+ seps = [" ", " </s><s>"]
66
+ role_map = {"user": "[INST]", "assistant": "[/INST]"}
67
+
68
+ system_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n"
69
+ ret = ""
70
+ for i, msg in enumerate(messages[1:]):
71
+ role = role_map[msg["role"]]
72
+ content = msg["content"]
73
+ if content:
74
+ if not isinstance(content, str):
75
+ raise ValueError(
76
+ "Currently multimodal context is not "
77
+ "supported by the token counter."
78
+ )
79
+ if i == 0:
80
+ ret += system_prompt + content
81
+ else:
82
+ ret += role + " " + content + seps[i % 2]
83
+ else:
84
+ ret += role
85
+ return ret
86
+ elif model in [ModelType.VICUNA, ModelType.VICUNA_16K]:
87
+ seps = [" ", "</s>"]
88
+ role_map = {"user": "USER", "assistant": "ASSISTANT"}
89
+
90
+ system_prompt = f"{system_message}"
91
+ ret = system_prompt + seps[0]
92
+ for i, msg in enumerate(messages[1:]):
93
+ role = role_map[msg["role"]]
94
+ content = msg["content"]
95
+ if not isinstance(content, str):
96
+ raise ValueError(
97
+ "Currently multimodal context is not "
98
+ "supported by the token counter."
99
+ )
100
+ if content:
101
+ ret += role + ": " + content + seps[i % 2]
102
+ else:
103
+ ret += role + ":"
104
+ return ret
105
+ elif model == ModelType.GLM_4_OPEN_SOURCE:
106
+ system_prompt = f"[gMASK]<sop><|system|>\n{system_message}"
107
+ ret = system_prompt
108
+ for msg in messages[1:]:
109
+ role = msg["role"]
110
+ content = msg["content"]
111
+ if not isinstance(content, str):
112
+ raise ValueError(
113
+ "Currently multimodal context is not "
114
+ "supported by the token counter."
115
+ )
116
+ if content:
117
+ ret += "<|" + role + "|>" + "\n" + content
118
+ else:
119
+ ret += "<|" + role + "|>" + "\n"
120
+ return ret
121
+ elif model == ModelType.QWEN_2:
122
+ system_prompt = f"<|im_start|>system\n{system_message}<|im_end|>"
123
+ ret = system_prompt + "\n"
124
+ for msg in messages[1:]:
125
+ role = msg["role"]
126
+ content = msg["content"]
127
+ if not isinstance(content, str):
128
+ raise ValueError(
129
+ "Currently multimodal context is not "
130
+ "supported by the token counter."
131
+ )
132
+ if content:
133
+ ret += (
134
+ '<|im_start|>'
135
+ + role
136
+ + '\n'
137
+ + content
138
+ + '<|im_end|>'
139
+ + '\n'
140
+ )
141
+ else:
142
+ ret += '<|im_start|>' + role + '\n'
143
+ return ret
144
+ elif model == ModelType.GROQ_MIXTRAL_8_7B:
145
+ # Mistral/Mixtral format
146
+ system_prompt = f"<s>[INST] {system_message} [/INST]\n"
147
+ ret = system_prompt
148
+
149
+ for msg in messages[1:]:
150
+ if msg["role"] == "user":
151
+ ret += f"[INST] {msg['content']} [/INST]\n"
152
+ elif msg["role"] == "assistant":
153
+ ret += f"{msg['content']}</s>\n"
154
+
155
+ if not isinstance(msg['content'], str):
156
+ raise ValueError(
157
+ "Currently multimodal context is not "
158
+ "supported by the token counter."
159
+ )
160
+
161
+ return ret.strip()
162
+ elif model in [ModelType.GROQ_GEMMA_7B_IT, ModelType.GROQ_GEMMA_2_9B_IT]:
163
+ # Gemma format
164
+ ret = f"<bos>{system_message}\n"
165
+ for msg in messages:
166
+ if msg["role"] == "user":
167
+ ret += f"Human: {msg['content']}\n"
168
+ elif msg["role"] == "assistant":
169
+ ret += f"Assistant: {msg['content']}\n"
170
+
171
+ if not isinstance(msg['content'], str):
172
+ raise ValueError(
173
+ "Currently multimodal context is not supported by the token counter."
174
+ )
175
+
176
+ ret += "<eos>"
177
+ return ret
178
+ else:
179
+ raise ValueError(f"Invalid model type: {model}")
180
+
181
+
48
182
  def get_model_encoding(value_for_tiktoken: str):
49
183
  r"""Get model encoding from tiktoken.
50
184
 
@@ -87,15 +221,67 @@ class BaseTokenCounter(ABC):
87
221
  pass
88
222
 
89
223
 
224
+ class OpenSourceTokenCounter(BaseTokenCounter):
225
+ def __init__(self, model_type: ModelType, model_path: str):
226
+ r"""Constructor for the token counter for open-source models.
227
+
228
+ Args:
229
+ model_type (ModelType): Model type for which tokens will be
230
+ counted.
231
+ model_path (str): The path to the model files, where the tokenizer
232
+ model should be located.
233
+ """
234
+
235
+ # Use a fast Rust-based tokenizer if it is supported for a given model.
236
+ # If a fast tokenizer is not available for a given model,
237
+ # a normal Python-based tokenizer is returned instead.
238
+ from transformers import AutoTokenizer
239
+
240
+ try:
241
+ tokenizer = AutoTokenizer.from_pretrained(
242
+ model_path,
243
+ use_fast=True,
244
+ )
245
+ except TypeError:
246
+ tokenizer = AutoTokenizer.from_pretrained(
247
+ model_path,
248
+ use_fast=False,
249
+ )
250
+ except Exception:
251
+ raise ValueError(
252
+ f"Invalid `model_path` ({model_path}) is provided. "
253
+ "Tokenizer loading failed."
254
+ )
255
+
256
+ self.tokenizer = tokenizer
257
+ self.model_type = model_type
258
+
259
+ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
260
+ r"""Count number of tokens in the provided message list using
261
+ loaded tokenizer specific for this type of model.
262
+
263
+ Args:
264
+ messages (List[OpenAIMessage]): Message list with the chat history
265
+ in OpenAI API format.
266
+
267
+ Returns:
268
+ int: Number of tokens in the messages.
269
+ """
270
+ prompt = messages_to_prompt(messages, self.model_type)
271
+ input_ids = self.tokenizer(prompt).input_ids
272
+
273
+ return len(input_ids)
274
+
275
+
90
276
  class OpenAITokenCounter(BaseTokenCounter):
91
- def __init__(self, model: UnifiedModelType):
277
+ def __init__(self, model: ModelType):
92
278
  r"""Constructor for the token counter for OpenAI models.
93
279
 
94
280
  Args:
95
- model (UnifiedModelType): Model type for which tokens will be
96
- counted.
281
+ model (ModelType): Model type for which tokens will be counted.
97
282
  """
98
283
  self.model: str = model.value_for_tiktoken
284
+ self.model_type = model
99
285
 
100
286
  self.tokens_per_message: int
101
287
  self.tokens_per_name: int
@@ -218,11 +404,15 @@ class OpenAITokenCounter(BaseTokenCounter):
218
404
 
219
405
 
220
406
  class AnthropicTokenCounter(BaseTokenCounter):
221
- @dependencies_required('anthropic')
222
- def __init__(self):
223
- r"""Constructor for the token counter for Anthropic models."""
224
- from anthropic import Anthropic
407
+ def __init__(self, model_type: ModelType):
408
+ r"""Constructor for the token counter for Anthropic models.
225
409
 
410
+ Args:
411
+ model_type (ModelType): Model type for which tokens will be
412
+ counted.
413
+ """
414
+
415
+ self.model_type = model_type
226
416
  self.client = Anthropic()
227
417
  self.tokenizer = self.client.get_tokenizer()
228
418
 
@@ -245,16 +435,12 @@ class AnthropicTokenCounter(BaseTokenCounter):
245
435
 
246
436
 
247
437
  class GeminiTokenCounter(BaseTokenCounter):
248
- def __init__(self, model_type: UnifiedModelType):
249
- r"""Constructor for the token counter for Gemini models.
250
-
251
- Args:
252
- model_type (UnifiedModelType): Model type for which tokens will be
253
- counted.
254
- """
438
+ def __init__(self, model_type: ModelType):
439
+ r"""Constructor for the token counter for Gemini models."""
255
440
  import google.generativeai as genai
256
441
 
257
- self._client = genai.GenerativeModel(model_type)
442
+ self.model_type = model_type
443
+ self._client = genai.GenerativeModel(self.model_type.value)
258
444
 
259
445
  def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
260
446
  r"""Count number of tokens in the provided message list using
@@ -282,13 +468,12 @@ class GeminiTokenCounter(BaseTokenCounter):
282
468
  return self._client.count_tokens(converted_messages).total_tokens
283
469
 
284
470
 
285
- class LiteLLMTokenCounter(BaseTokenCounter):
286
- def __init__(self, model_type: UnifiedModelType):
471
+ class LiteLLMTokenCounter:
472
+ def __init__(self, model_type: str):
287
473
  r"""Constructor for the token counter for LiteLLM models.
288
474
 
289
475
  Args:
290
- model_type (UnifiedModelType): Model type for which tokens will be
291
- counted.
476
+ model_type (str): Model type for which tokens will be counted.
292
477
  """
293
478
  self.model_type = model_type
294
479
  self._token_counter = None
@@ -353,10 +538,7 @@ class MistralTokenCounter(BaseTokenCounter):
353
538
  model_name = (
354
539
  "codestral-22b"
355
540
  if self.model_type
356
- in {
357
- ModelType.MISTRAL_CODESTRAL,
358
- ModelType.MISTRAL_CODESTRAL_MAMBA,
359
- }
541
+ in {ModelType.MISTRAL_CODESTRAL, ModelType.MISTRAL_CODESTRAL_MAMBA}
360
542
  else self.model_type.value
361
543
  )
362
544
 
@@ -172,7 +172,7 @@ class RolePlayingWorker(Worker):
172
172
  role_name="User",
173
173
  content=prompt,
174
174
  )
175
- response = self.summarize_agent.step(req, response_format=TaskResult)
175
+ response = self.summarize_agent.step(req, output_schema=TaskResult)
176
176
  result_dict = ast.literal_eval(response.msg.content)
177
177
  task_result = TaskResult(**result_dict)
178
178
  task.result = task_result.content
@@ -77,7 +77,7 @@ class SingleAgentWorker(Worker):
77
77
  content=prompt,
78
78
  )
79
79
  try:
80
- response = self.worker.step(req, response_format=TaskResult)
80
+ response = self.worker.step(req, output_schema=TaskResult)
81
81
  except Exception as e:
82
82
  print(
83
83
  f"{Fore.RED}Error occurred while processing task {task.id}:"
@@ -21,13 +21,12 @@ from camel.tasks import Task
21
21
  class PacketStatus(Enum):
22
22
  r"""The status of a packet. The packet can be in one of the following
23
23
  states:
24
-
25
24
  - ``SENT``: The packet has been sent to a worker.
26
25
  - ``RETURNED``: The packet has been returned by the worker, meaning that
27
- the status of the task inside has been updated.
26
+ the status of the task inside has been updated.
28
27
  - ``ARCHIVED``: The packet has been archived, meaning that the content of
29
- the task inside will not be changed. The task is considered
30
- as a dependency.
28
+ the task inside will not be changed. The task is considered
29
+ as a dependency.
31
30
  """
32
31
 
33
32
  SENT = "SENT"
@@ -287,7 +287,7 @@ class Workforce(BaseNode):
287
287
  )
288
288
 
289
289
  response = self.coordinator_agent.step(
290
- req, response_format=TaskAssignResult
290
+ req, output_schema=TaskAssignResult
291
291
  )
292
292
  result_dict = ast.literal_eval(response.msg.content)
293
293
  task_assign_result = TaskAssignResult(**result_dict)
@@ -319,7 +319,7 @@ class Workforce(BaseNode):
319
319
  role_name="User",
320
320
  content=prompt,
321
321
  )
322
- response = self.coordinator_agent.step(req, response_format=WorkerConf)
322
+ response = self.coordinator_agent.step(req, output_schema=WorkerConf)
323
323
  result_dict = ast.literal_eval(response.msg.content)
324
324
  new_node_conf = WorkerConf(**result_dict)
325
325
 
@@ -364,8 +364,8 @@ class Workforce(BaseNode):
364
364
  ).as_dict()
365
365
 
366
366
  model = ModelFactory.create(
367
- model_platform=ModelPlatformType.DEFAULT,
368
- model_type=ModelType.DEFAULT,
367
+ model_platform=ModelPlatformType.OPENAI,
368
+ model_type=ModelType.GPT_4O,
369
369
  model_config_dict=model_config_dict,
370
370
  )
371
371
 
@@ -1,17 +1,16 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.2.3
3
+ Version: 0.2.3a0
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
7
7
  Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
8
8
  Author: CAMEL-AI.org
9
- Requires-Python: >=3.10,<3.13
9
+ Requires-Python: >=3.10.0,<3.12
10
10
  Classifier: License :: OSI Approved :: Apache Software License
11
11
  Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.10
13
13
  Classifier: Programming Language :: Python :: 3.11
14
- Classifier: Programming Language :: Python :: 3.12
15
14
  Provides-Extra: all
16
15
  Provides-Extra: encoders
17
16
  Provides-Extra: graph-storages
@@ -28,10 +27,7 @@ Provides-Extra: vector-databases
28
27
  Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
29
28
  Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
30
29
  Requires-Dist: agentops (>=0.3.6,<0.4.0) ; extra == "tools" or extra == "all"
31
- Requires-Dist: anthropic (>=0.29.0,<0.30.0) ; extra == "model-platforms" or extra == "all"
32
- Requires-Dist: arxiv (>=2.1.3,<3.0.0) ; extra == "tools" or extra == "all"
33
- Requires-Dist: arxiv2text (>=0.1.14,<0.2.0) ; extra == "tools" or extra == "all"
34
- Requires-Dist: asknews (>=0.7.43,<0.8.0) ; extra == "tools" or extra == "all"
30
+ Requires-Dist: anthropic (>=0.29.0,<0.30.0)
35
31
  Requires-Dist: azure-storage-blob (>=12.21.0,<13.0.0) ; extra == "object-storages" or extra == "all"
36
32
  Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
37
33
  Requires-Dist: botocore (>=1.35.3,<2.0.0) ; extra == "object-storages" or extra == "all"
@@ -50,12 +46,13 @@ Requires-Dist: firecrawl-py (>=1.0.0,<2.0.0) ; extra == "tools" or extra == "all
50
46
  Requires-Dist: google-cloud-storage (>=2.18.0,<3.0.0) ; extra == "object-storages" or extra == "all"
51
47
  Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
52
48
  Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
49
+ Requires-Dist: groq (>=0.5.0,<0.6.0)
53
50
  Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
54
- Requires-Dist: ipykernel (>=6.0.0,<7.0.0) ; extra == "tools" or extra == "all"
51
+ Requires-Dist: ipykernel (>=6.0.0,<7.0.0)
55
52
  Requires-Dist: jsonschema (>=4,<5)
56
53
  Requires-Dist: jupyter_client (>=8.6.2,<9.0.0) ; extra == "tools" or extra == "all"
57
54
  Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
58
- Requires-Dist: mistralai (>=1.1.0,<2.0.0) ; extra == "model-platforms" or extra == "all"
55
+ Requires-Dist: mistralai (>=1.0.0,<2.0.0) ; extra == "model-platforms" or extra == "all"
59
56
  Requires-Dist: mock (>=5,<6) ; extra == "test"
60
57
  Requires-Dist: nebula3-python (==3.8.2) ; extra == "rag" or extra == "graph-storages" or extra == "all"
61
58
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "rag" or extra == "graph-storages" or extra == "all"
@@ -67,7 +64,6 @@ Requires-Dist: openapi-spec-validator (>=0.7.1,<0.8.0) ; extra == "tools" or ext
67
64
  Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
68
65
  Requires-Dist: pandoc
69
66
  Requires-Dist: pathlib (>=1.0.1,<2.0.0)
70
- Requires-Dist: pdfplumber (>=0.11.0,<0.12.0) ; extra == "tools" or extra == "all"
71
67
  Requires-Dist: pillow (>=10.2.0,<11.0.0) ; extra == "tools" or extra == "all"
72
68
  Requires-Dist: prance (>=23.6.21.0,<24.0.0.0) ; extra == "tools" or extra == "all"
73
69
  Requires-Dist: praw (>=7.7.1,<8.0.0) ; extra == "tools" or extra == "all"
@@ -85,18 +81,15 @@ Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "rag" or extra == "retrieve
85
81
  Requires-Dist: redis (>=5.0.6,<6.0.0) ; extra == "kv-stroages" or extra == "all"
86
82
  Requires-Dist: reka-api (>=3.0.8,<4.0.0) ; extra == "model-platforms" or extra == "all"
87
83
  Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
88
- Requires-Dist: scholarly[tor] (==1.7.11) ; extra == "tools" or extra == "all"
89
84
  Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "rag" or extra == "encoders" or extra == "all"
90
85
  Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
91
- Requires-Dist: slack-bolt (>=1.20.1,<2.0.0) ; extra == "tools" or extra == "all"
92
86
  Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
93
87
  Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
94
88
  Requires-Dist: textblob (>=0.18.0.post0,<0.19.0) ; extra == "tools" or extra == "all"
95
89
  Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
96
- Requires-Dist: torch (==2.1.0) ; (platform_system == "Darwin" and platform_machine != "arm64") and (extra == "huggingface-agent" or extra == "all")
97
- Requires-Dist: torch (>=2,<3) ; (platform_system != "Darwin" or platform_machine == "arm64") and (extra == "huggingface-agent" or extra == "all")
90
+ Requires-Dist: torch (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
98
91
  Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
99
- Requires-Dist: unstructured[all-docs] (>=0.14,<0.15) ; extra == "rag" or extra == "tools" or extra == "all"
92
+ Requires-Dist: unstructured[all-docs] (>=0.10,<0.11) ; extra == "rag" or extra == "tools" or extra == "all"
100
93
  Requires-Dist: wikipedia (>=1,<2) ; extra == "search-tools" or extra == "tools" or extra == "all"
101
94
  Requires-Dist: wolframalpha (>=5.0.0,<6.0.0) ; extra == "search-tools" or extra == "tools" or extra == "all"
102
95
  Project-URL: Documentation, https://docs.camel-ai.org
@@ -112,13 +105,14 @@ Description-Content-Type: text/markdown
112
105
 
113
106
  ______________________________________________________________________
114
107
 
115
- # CAMEL: Finding the Scaling Laws of Agents
108
+ # CAMEL: Communicative Agents for “Mind” Exploration of Large Language Model Society
116
109
 
117
110
  [![Python Version][python-image]][python-url]
118
111
  [![PyTest Status][pytest-image]][pytest-url]
119
112
  [![Documentation][docs-image]][docs-url]
120
113
  [![Star][star-image]][star-url]
121
114
  [![Package License][package-license-image]][package-license-url]
115
+ [![Data License][data-license-image]][data-license-url]
122
116
 
123
117
  <p align="center">
124
118
  <a href="https://github.com/camel-ai/camel#community">Community</a> |
@@ -132,14 +126,16 @@ ______________________________________________________________________
132
126
  </p>
133
127
 
134
128
  <p align="center">
135
- <img src='https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png' width=800>
129
+ <img src='https://raw.githubusercontent.com/camel-ai/camel/master/misc/primary_logo.png' width=800>
136
130
  </p>
137
131
 
132
+ ## Overview
133
+ The rapid advancement of conversational and chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents and provide insight into their "cognitive" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named *role-playing*. Our approach involves using *inception prompting* to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of chat agents, providing a valuable resource for investigating conversational language models. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond. The GitHub repository of this project is made publicly available on: [https://github.com/camel-ai/camel](https://github.com/camel-ai/camel).
138
134
 
139
135
  ## Community
140
- 🐫 CAMEL is an open-source community dedicated to finding the scaling laws of agents. We believe that studying these agents on a large scale offers valuable insights into their behaviors, capabilities, and potential risks. To facilitate research in this field, we implement and support various types of agents, tasks, prompts, models, and simulated environments.
136
+ 🐫 CAMEL is an open-source library designed for the study of autonomous and communicative agents. We believe that studying these agents on a large scale offers valuable insights into their behaviors, capabilities, and potential risks. To facilitate research in this field, we implement and support various types of agents, tasks, prompts, models, and simulated environments.
141
137
 
142
- Join us ([*Discord*](https://discord.camel-ai.org/), [*WeChat*](https://ghli.org/camel/wechat.png) or [*Slack*](https://join.slack.com/t/camel-ai/shared_invite/zt-2g7xc41gy-_7rcrNNAArIP6sLQqldkqQ)) in pushing the boundaries of finding the scaling laws of agents.
138
+ Join us ([*Slack*](https://join.slack.com/t/camel-ai/shared_invite/zt-2g7xc41gy-_7rcrNNAArIP6sLQqldkqQ), [*Discord*](https://discord.gg/CNcNpquyDc) or [*WeChat*](https://ghli.org/camel/wechat.png)) in pushing the boundaries of building AI Society.
143
139
 
144
140
  ## Try it yourself
145
141
  We provide a [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1AzP33O8rnMW__7ocWJhVBXjKziJXPtim?usp=sharing) demo showcasing a conversation between two ChatGPT agents playing roles as a python programmer and a stock trader collaborating on developing a trading bot for stock market.
@@ -175,7 +171,7 @@ Some features require extra dependencies:
175
171
  Install `CAMEL` from source with poetry (Recommended):
176
172
  ```sh
177
173
  # Make sure your python version is later than 3.10
178
- # You can use pyenv to manage multiple python versions in your system
174
+ # You can use pyenv to manage multiple python verisons in your sytstem
179
175
 
180
176
  # Clone github repo
181
177
  git clone https://github.com/camel-ai/camel.git
@@ -183,7 +179,7 @@ git clone https://github.com/camel-ai/camel.git
183
179
  # Change directory into project directory
184
180
  cd camel
185
181
 
186
- # If you didn't install poetry before
182
+ # If you didn't install peotry before
187
183
  pip install poetry # (Optional)
188
184
 
189
185
  # We suggest using python 3.10
@@ -221,7 +217,7 @@ conda create --name camel python=3.10
221
217
  conda activate camel
222
218
 
223
219
  # Clone github repo
224
- git clone -b v0.2.3 https://github.com/camel-ai/camel.git
220
+ git clone -b v0.2.2 https://github.com/camel-ai/camel.git
225
221
 
226
222
  # Change directory into project directory
227
223
  cd camel
@@ -292,7 +288,35 @@ Please note that the environment variable is session-specific. If you open a new
292
288
  ```bash
293
289
  ollama pull llama3
294
290
  ```
295
- - Run the script. Enjoy your Llama3 model, enhanced by CAMEL's excellent agents.
291
+ - Create a ModelFile similar the one below in your project directory.
292
+ ```bash
293
+ FROM llama3
294
+
295
+ # Set parameters
296
+ PARAMETER temperature 0.8
297
+ PARAMETER stop Result
298
+
299
+ # Sets a custom system message to specify the behavior of the chat assistant
300
+
301
+ # Leaving it blank for now.
302
+
303
+ SYSTEM """ """
304
+ ```
305
+ - Create a script to get the base model (llama3) and create a custom model using the ModelFile above. Save this as a .sh file:
306
+ ```bash
307
+ #!/bin/zsh
308
+
309
+ # variables
310
+ model_name="llama3"
311
+ custom_model_name="camel-llama3"
312
+
313
+ #get the base model
314
+ ollama pull $model_name
315
+
316
+ #create the model file
317
+ ollama create $custom_model_name -f ./Llama3ModelFile
318
+ ```
319
+ - Navigate to the directory where the script and ModelFile are located and run the script. Enjoy your Llama3 model, enhanced by CAMEL's excellent agents.
296
320
  ```python
297
321
  from camel.agents import ChatAgent
298
322
  from camel.messages import BaseMessage
@@ -302,6 +326,7 @@ Please note that the environment variable is session-specific. If you open a new
302
326
  ollama_model = ModelFactory.create(
303
327
  model_platform=ModelPlatformType.OLLAMA,
304
328
  model_type="llama3",
329
+ url="http://localhost:11434/v1",
305
330
  model_config_dict={"temperature": 0.4},
306
331
  )
307
332
 
@@ -388,7 +413,7 @@ We implemented amazing research ideas from other works for you to build, compare
388
413
  year={2023}
389
414
  }
390
415
  ```
391
- ## Acknowledgment
416
+ ## Acknowledgement
392
417
  Special thanks to [Nomic AI](https://home.nomic.ai/) for giving us extended access to their data set exploration tool (Atlas).
393
418
 
394
419
  We would also like to thank Haya Hammoud for designing the initial logo of our project.
@@ -397,14 +422,16 @@ We would also like to thank Haya Hammoud for designing the initial logo of our p
397
422
 
398
423
  The source code is licensed under Apache 2.0.
399
424
 
425
+ The datasets are licensed under CC BY NC 4.0, which permits only non-commercial usage. It is advised that any models trained using the dataset should not be utilized for anything other than research purposes.
426
+
400
427
  ## Contributing to CAMEL 🐫
401
428
  We appreciate your interest in contributing to our open-source initiative. We provide a document of [contributing guidelines](https://github.com/camel-ai/camel/blob/master/CONTRIBUTING.md) which outlines the steps for contributing to CAMEL. Please refer to this guide to ensure smooth collaboration and successful contributions. 🤝🚀
402
429
 
403
430
  ## Contact
404
431
  For more information please contact camel.ai.team@gmail.com.
405
432
 
406
- [python-image]: https://img.shields.io/badge/Python-3.10%2C%203.11%2C%203.12-brightgreen.svg
407
- [python-url]: https://www.python.org/
433
+ [python-image]: https://img.shields.io/badge/Python-3.10%2B-brightgreen.svg
434
+ [python-url]: https://docs.python.org/3.10/
408
435
  [pytest-image]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml/badge.svg
409
436
  [pytest-url]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml
410
437
  [docs-image]: https://img.shields.io/badge/Documentation-grey.svg?logo=github
@@ -413,6 +440,8 @@ For more information please contact camel.ai.team@gmail.com.
413
440
  [star-url]: https://github.com/camel-ai/camel/stargazers
414
441
  [package-license-image]: https://img.shields.io/badge/License-Apache_2.0-blue.svg
415
442
  [package-license-url]: https://github.com/camel-ai/camel/blob/master/licenses/LICENSE
443
+ [data-license-image]: https://img.shields.io/badge/License-CC_BY--NC_4.0-lightgrey.svg
444
+ [data-license-url]: https://github.com/camel-ai/camel/blob/master/licenses/DATA_LICENSE
416
445
 
417
446
  [colab-url]: https://colab.research.google.com/drive/1AzP33O8rnMW__7ocWJhVBXjKziJXPtim?usp=sharing
418
447
  [colab-image]: https://colab.research.google.com/assets/colab-badge.svg
@@ -420,7 +449,7 @@ For more information please contact camel.ai.team@gmail.com.
420
449
  [huggingface-image]: https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-CAMEL--AI-ffc107?color=ffc107&logoColor=white
421
450
  [slack-url]: https://join.slack.com/t/camel-ai/shared_invite/zt-2g7xc41gy-_7rcrNNAArIP6sLQqldkqQ
422
451
  [slack-image]: https://img.shields.io/badge/Slack-CAMEL--AI-blueviolet?logo=slack
423
- [discord-url]: https://discord.camel-ai.org/
452
+ [discord-url]: https://discord.gg/CNcNpquyDc
424
453
  [discord-image]: https://img.shields.io/badge/Discord-CAMEL--AI-7289da?logo=discord&logoColor=white&color=7289da
425
454
  [wechat-url]: https://ghli.org/camel/wechat.png
426
455
  [wechat-image]: https://img.shields.io/badge/WeChat-CamelAIOrg-brightgreen?logo=wechat&logoColor=white