camel-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (99) hide show
  1. camel/__init__.py +1 -11
  2. camel/agents/__init__.py +5 -5
  3. camel/agents/chat_agent.py +124 -63
  4. camel/agents/critic_agent.py +28 -17
  5. camel/agents/deductive_reasoner_agent.py +235 -0
  6. camel/agents/embodied_agent.py +92 -40
  7. camel/agents/role_assignment_agent.py +27 -17
  8. camel/agents/task_agent.py +60 -34
  9. camel/agents/tool_agents/base.py +0 -1
  10. camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
  11. camel/configs.py +119 -7
  12. camel/embeddings/__init__.py +2 -0
  13. camel/embeddings/base.py +3 -2
  14. camel/embeddings/openai_embedding.py +3 -3
  15. camel/embeddings/sentence_transformers_embeddings.py +65 -0
  16. camel/functions/__init__.py +13 -3
  17. camel/functions/google_maps_function.py +335 -0
  18. camel/functions/math_functions.py +7 -7
  19. camel/functions/openai_function.py +344 -42
  20. camel/functions/search_functions.py +100 -35
  21. camel/functions/twitter_function.py +484 -0
  22. camel/functions/weather_functions.py +36 -23
  23. camel/generators.py +65 -46
  24. camel/human.py +17 -11
  25. camel/interpreters/__init__.py +25 -0
  26. camel/interpreters/base.py +49 -0
  27. camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
  28. camel/interpreters/interpreter_error.py +19 -0
  29. camel/interpreters/subprocess_interpreter.py +190 -0
  30. camel/loaders/__init__.py +22 -0
  31. camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
  32. camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
  33. camel/memories/__init__.py +17 -7
  34. camel/memories/agent_memories.py +156 -0
  35. camel/memories/base.py +97 -32
  36. camel/memories/blocks/__init__.py +21 -0
  37. camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
  38. camel/memories/blocks/vectordb_block.py +101 -0
  39. camel/memories/context_creators/__init__.py +3 -2
  40. camel/memories/context_creators/score_based.py +32 -20
  41. camel/memories/records.py +6 -5
  42. camel/messages/__init__.py +2 -2
  43. camel/messages/base.py +99 -16
  44. camel/messages/func_message.py +7 -4
  45. camel/models/__init__.py +4 -2
  46. camel/models/anthropic_model.py +132 -0
  47. camel/models/base_model.py +3 -2
  48. camel/models/model_factory.py +10 -8
  49. camel/models/open_source_model.py +25 -13
  50. camel/models/openai_model.py +9 -10
  51. camel/models/stub_model.py +6 -5
  52. camel/prompts/__init__.py +7 -5
  53. camel/prompts/ai_society.py +21 -14
  54. camel/prompts/base.py +54 -47
  55. camel/prompts/code.py +22 -14
  56. camel/prompts/evaluation.py +8 -5
  57. camel/prompts/misalignment.py +26 -19
  58. camel/prompts/object_recognition.py +35 -0
  59. camel/prompts/prompt_templates.py +14 -8
  60. camel/prompts/role_description_prompt_template.py +16 -10
  61. camel/prompts/solution_extraction.py +9 -5
  62. camel/prompts/task_prompt_template.py +24 -21
  63. camel/prompts/translation.py +9 -5
  64. camel/responses/agent_responses.py +5 -2
  65. camel/retrievers/__init__.py +24 -0
  66. camel/retrievers/auto_retriever.py +319 -0
  67. camel/retrievers/base.py +64 -0
  68. camel/retrievers/bm25_retriever.py +149 -0
  69. camel/retrievers/vector_retriever.py +166 -0
  70. camel/societies/__init__.py +1 -1
  71. camel/societies/babyagi_playing.py +56 -32
  72. camel/societies/role_playing.py +188 -133
  73. camel/storages/__init__.py +18 -0
  74. camel/storages/graph_storages/__init__.py +23 -0
  75. camel/storages/graph_storages/base.py +82 -0
  76. camel/storages/graph_storages/graph_element.py +74 -0
  77. camel/storages/graph_storages/neo4j_graph.py +582 -0
  78. camel/storages/key_value_storages/base.py +1 -2
  79. camel/storages/key_value_storages/in_memory.py +1 -2
  80. camel/storages/key_value_storages/json.py +8 -13
  81. camel/storages/vectordb_storages/__init__.py +33 -0
  82. camel/storages/vectordb_storages/base.py +202 -0
  83. camel/storages/vectordb_storages/milvus.py +396 -0
  84. camel/storages/vectordb_storages/qdrant.py +371 -0
  85. camel/terminators/__init__.py +1 -1
  86. camel/terminators/base.py +2 -3
  87. camel/terminators/response_terminator.py +21 -12
  88. camel/terminators/token_limit_terminator.py +5 -3
  89. camel/types/__init__.py +12 -6
  90. camel/types/enums.py +86 -13
  91. camel/types/openai_types.py +10 -5
  92. camel/utils/__init__.py +18 -13
  93. camel/utils/commons.py +242 -81
  94. camel/utils/token_counting.py +135 -15
  95. {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/METADATA +116 -74
  96. camel_ai-0.1.3.dist-info/RECORD +101 -0
  97. {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/WHEEL +1 -1
  98. camel/memories/context_creators/base.py +0 -72
  99. camel_ai-0.1.1.dist-info/RECORD +0 -75
@@ -0,0 +1,166 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Any, Dict, List, Optional
15
+
16
+ from camel.embeddings import BaseEmbedding, OpenAIEmbedding
17
+ from camel.functions import UnstructuredIO
18
+ from camel.retrievers.base import BaseRetriever
19
+ from camel.storages import BaseVectorStorage, VectorDBQuery, VectorRecord
20
+
21
+ DEFAULT_TOP_K_RESULTS = 1
22
+ DEFAULT_SIMILARITY_THRESHOLD = 0.75
23
+
24
+
25
+ class VectorRetriever(BaseRetriever):
26
+ r"""An implementation of the `BaseRetriever` by using vector storage and
27
+ embedding model.
28
+
29
+ This class facilitates the retriever of relevant information using a
30
+ query-based approach, backed by vector embeddings.
31
+
32
+ Attributes:
33
+ embedding_model (BaseEmbedding): Embedding model used to generate
34
+ vector embeddings.
35
+ """
36
+
37
+ def __init__(self, embedding_model: Optional[BaseEmbedding] = None) -> None:
38
+ r"""Initializes the retriever class with an optional embedding model.
39
+
40
+ Args:
41
+ embedding_model (Optional[BaseEmbedding]): The embedding model
42
+ instance. Defaults to `OpenAIEmbedding` if not provided.
43
+ """
44
+ self.embedding_model = embedding_model or OpenAIEmbedding()
45
+
46
+ def process( # type: ignore[override]
47
+ self,
48
+ content_input_path: str,
49
+ storage: BaseVectorStorage,
50
+ chunk_type: str = "chunk_by_title",
51
+ **kwargs: Any,
52
+ ) -> None:
53
+ r"""Processes content from a file or URL, divides it into chunks by
54
+ using `Unstructured IO`, and stores their embeddings in the specified
55
+ vector storage.
56
+
57
+ Args:
58
+ content_input_path (str): File path or URL of the content to be
59
+ processed.
60
+ chunk_type (str): Type of chunking going to apply. Defaults to
61
+ "chunk_by_title".
62
+ **kwargs (Any): Additional keyword arguments for elements chunking.
63
+ """
64
+ unstructured_modules = UnstructuredIO()
65
+ elements = unstructured_modules.parse_file_or_url(content_input_path)
66
+ chunks = unstructured_modules.chunk_elements(
67
+ chunk_type=chunk_type, elements=elements, **kwargs
68
+ )
69
+ # Iterate to process and store embeddings, set batch of 50
70
+ for i in range(0, len(chunks), 50):
71
+ batch_chunks = chunks[i : i + 50]
72
+ batch_vectors = self.embedding_model.embed_list(
73
+ objs=[str(chunk) for chunk in batch_chunks]
74
+ )
75
+
76
+ records = []
77
+ # Prepare the payload for each vector record, includes the content
78
+ # path, chunk metadata, and chunk text
79
+ for vector, chunk in zip(batch_vectors, batch_chunks):
80
+ content_path_info = {"content path": content_input_path}
81
+ chunk_metadata = {"metadata": chunk.metadata.to_dict()}
82
+ chunk_text = {"text": str(chunk)}
83
+ combined_dict = {
84
+ **content_path_info,
85
+ **chunk_metadata,
86
+ **chunk_text,
87
+ }
88
+
89
+ records.append(
90
+ VectorRecord(vector=vector, payload=combined_dict)
91
+ )
92
+
93
+ storage.add(records=records)
94
+
95
+ def query( # type: ignore[override]
96
+ self,
97
+ query: str,
98
+ storage: BaseVectorStorage,
99
+ top_k: int = DEFAULT_TOP_K_RESULTS,
100
+ similarity_threshold: float = DEFAULT_SIMILARITY_THRESHOLD,
101
+ **kwargs: Any,
102
+ ) -> List[Dict[str, Any]]:
103
+ r"""Executes a query in vector storage and compiles the retrieved
104
+ results into a dictionary.
105
+
106
+ Args:
107
+ query (str): Query string for information retriever.
108
+ storage (BaseVectorStorage): Vector storage to query.
109
+ top_k (int, optional): The number of top results to return during
110
+ retriever. Must be a positive integer. Defaults to 1.
111
+ similarity_threshold (float, optional): The similarity threshold
112
+ for filtering results. Defaults to 0.75.
113
+ **kwargs (Any): Additional keyword arguments for vector storage
114
+ query.
115
+
116
+ Returns:
117
+ List[Dict[str, Any]]: Concatenated list of the query results.
118
+
119
+ Raises:
120
+ ValueError: If 'top_k' is less than or equal to 0, if vector
121
+ storage is empty, if payload of vector storage is None.
122
+ """
123
+
124
+ if top_k <= 0:
125
+ raise ValueError("top_k must be a positive integer.")
126
+
127
+ # Load the storage incase it's hosted remote
128
+ storage.load()
129
+
130
+ query_vector = self.embedding_model.embed(obj=query)
131
+ db_query = VectorDBQuery(query_vector=query_vector, top_k=top_k)
132
+ query_results = storage.query(query=db_query, **kwargs)
133
+
134
+ if query_results[0].record.payload is None:
135
+ raise ValueError(
136
+ "Payload of vector storage is None, please check"
137
+ " the collection."
138
+ )
139
+
140
+ # format the results
141
+ formatted_results = []
142
+ for result in query_results:
143
+ if (
144
+ result.similarity >= similarity_threshold
145
+ and result.record.payload is not None
146
+ ):
147
+ result_dict = {
148
+ 'similarity score': str(result.similarity),
149
+ 'content path': result.record.payload.get(
150
+ 'content path', ''
151
+ ),
152
+ 'metadata': result.record.payload.get('metadata', {}),
153
+ 'text': result.record.payload.get('text', ''),
154
+ }
155
+ formatted_results.append(result_dict)
156
+
157
+ content_path = query_results[0].record.payload.get('content path', '')
158
+
159
+ if not formatted_results:
160
+ return [
161
+ {
162
+ 'text': f"""No suitable information retrieved from {content_path} \
163
+ with similarity_threshold = {similarity_threshold}."""
164
+ }
165
+ ]
166
+ return formatted_results
@@ -11,8 +11,8 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from .role_playing import RolePlaying
15
14
  from .babyagi_playing import BabyAGI
15
+ from .role_playing import RolePlaying
16
16
 
17
17
  __all__ = [
18
18
  'RolePlaying',
@@ -80,13 +80,17 @@ class BabyAGI:
80
80
  self.task_type = task_type
81
81
  self.task_prompt = task_prompt
82
82
  self.specified_task_prompt: TextPrompt
83
- self.init_specified_task_prompt(assistant_role_name, user_role_name,
84
- task_specify_agent_kwargs,
85
- extend_task_specify_meta_dict,
86
- output_language)
83
+ self.init_specified_task_prompt(
84
+ assistant_role_name,
85
+ user_role_name,
86
+ task_specify_agent_kwargs,
87
+ extend_task_specify_meta_dict,
88
+ output_language,
89
+ )
87
90
 
88
91
  sys_msg_generator = SystemMessageGenerator(
89
- task_type=self.task_type, **(sys_msg_generator_kwargs or {}))
92
+ task_type=self.task_type, **(sys_msg_generator_kwargs or {})
93
+ )
90
94
 
91
95
  init_assistant_sys_msg = sys_msg_generator.from_dicts(
92
96
  meta_dicts=[
@@ -105,20 +109,27 @@ class BabyAGI:
105
109
  self.assistant_sys_msg: BaseMessage
106
110
  self.task_creation_agent: TaskCreationAgent
107
111
  self.task_prioritization_agent: TaskPrioritizationAgent
108
- self.init_agents(init_assistant_sys_msg[0], assistant_agent_kwargs,
109
- task_creation_agent_kwargs,
110
- task_prioritization_agent_kwargs, output_language,
111
- message_window_size)
112
+ self.init_agents(
113
+ init_assistant_sys_msg[0],
114
+ assistant_agent_kwargs,
115
+ task_creation_agent_kwargs,
116
+ task_prioritization_agent_kwargs,
117
+ output_language,
118
+ message_window_size,
119
+ )
112
120
 
113
121
  self.subtasks: deque = deque([])
114
122
  self.solved_subtasks: List[str] = []
115
123
  self.MAX_TASK_HISTORY = max_task_history
116
124
 
117
125
  def init_specified_task_prompt(
118
- self, assistant_role_name: str, user_role_name: str,
119
- task_specify_agent_kwargs: Optional[Dict],
120
- extend_task_specify_meta_dict: Optional[Dict],
121
- output_language: Optional[str]):
126
+ self,
127
+ assistant_role_name: str,
128
+ user_role_name: str,
129
+ task_specify_agent_kwargs: Optional[Dict],
130
+ extend_task_specify_meta_dict: Optional[Dict],
131
+ output_language: Optional[str],
132
+ ):
122
133
  r"""Use a task specify agent to generate a specified task prompt.
123
134
  Generated specified task prompt will be used to replace original
124
135
  task prompt. If there is no task specify agent, specified task
@@ -138,8 +149,10 @@ class BabyAGI:
138
149
  task_specify_meta_dict = dict()
139
150
  if self.task_type in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT]:
140
151
  task_specify_meta_dict.update(
141
- dict(assistant_role=assistant_role_name,
142
- user_role=user_role_name))
152
+ dict(
153
+ assistant_role=assistant_role_name, user_role=user_role_name
154
+ )
155
+ )
143
156
  task_specify_meta_dict.update(extend_task_specify_meta_dict or {})
144
157
  task_specify_agent = TaskSpecifyAgent(
145
158
  task_type=self.task_type,
@@ -151,12 +164,15 @@ class BabyAGI:
151
164
  meta_dict=task_specify_meta_dict,
152
165
  )
153
166
 
154
- def init_agents(self, init_assistant_sys_msg: BaseMessage,
155
- assistant_agent_kwargs: Optional[Dict],
156
- task_creation_agent_kwargs: Optional[Dict],
157
- task_prioritization_agent_kwargs: Optional[Dict],
158
- output_language: Optional[str],
159
- message_window_size: Optional[int] = None):
167
+ def init_agents(
168
+ self,
169
+ init_assistant_sys_msg: BaseMessage,
170
+ assistant_agent_kwargs: Optional[Dict],
171
+ task_creation_agent_kwargs: Optional[Dict],
172
+ task_prioritization_agent_kwargs: Optional[Dict],
173
+ output_language: Optional[str],
174
+ message_window_size: Optional[int] = None,
175
+ ):
160
176
  r"""Initialize assistant and user agents with their system messages.
161
177
 
162
178
  Args:
@@ -215,12 +231,14 @@ class BabyAGI:
215
231
  if not self.subtasks:
216
232
  new_subtask_list = self.task_creation_agent.run(task_list=[])
217
233
  prioritized_subtask_list = self.task_prioritization_agent.run(
218
- new_subtask_list)
234
+ new_subtask_list
235
+ )
219
236
  self.subtasks = deque(prioritized_subtask_list)
220
237
 
221
238
  task_name = self.subtasks.popleft()
222
239
  assistant_msg_msg = BaseMessage.make_user_message(
223
- role_name=self.assistant_sys_msg.role_name, content=f"{task_name}")
240
+ role_name=self.assistant_sys_msg.role_name, content=f"{task_name}"
241
+ )
224
242
 
225
243
  assistant_response = self.assistant_agent.step(assistant_msg_msg)
226
244
  assistant_msg = assistant_response.msgs[0]
@@ -232,12 +250,14 @@ class BabyAGI:
232
250
  past_tasks = self.solved_subtasks + list(self.subtasks)
233
251
 
234
252
  new_subtask_list = self.task_creation_agent.run(
235
- task_list=past_tasks[-self.MAX_TASK_HISTORY:])
253
+ task_list=past_tasks[-self.MAX_TASK_HISTORY :]
254
+ )
236
255
 
237
256
  if new_subtask_list:
238
257
  self.subtasks.extend(new_subtask_list)
239
258
  prioritized_subtask_list = self.task_prioritization_agent.run(
240
- task_list=list(self.subtasks)[-self.MAX_TASK_HISTORY:])
259
+ task_list=list(self.subtasks)[-self.MAX_TASK_HISTORY :]
260
+ )
241
261
  self.subtasks = deque(prioritized_subtask_list)
242
262
  else:
243
263
  print("no new tasks")
@@ -245,10 +265,14 @@ class BabyAGI:
245
265
  assistant_response.info['subtasks'] = list(self.subtasks)
246
266
  if not self.subtasks:
247
267
  terminated = True
248
- assistant_response.info[
249
- 'termination_reasons'] = "All tasks are solved"
250
- return ChatAgentResponse([assistant_msg], terminated,
251
- assistant_response.info)
252
- return ChatAgentResponse([assistant_msg],
253
- assistant_response.terminated,
254
- assistant_response.info)
268
+ assistant_response.info['termination_reasons'] = (
269
+ "All tasks are solved"
270
+ )
271
+ return ChatAgentResponse(
272
+ [assistant_msg], terminated, assistant_response.info
273
+ )
274
+ return ChatAgentResponse(
275
+ [assistant_msg],
276
+ assistant_response.terminated,
277
+ assistant_response.info,
278
+ )