camel-ai 0.2.13__py3-none-any.whl → 0.2.15a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -0,0 +1,249 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ from typing import Any, Callable, List, Literal, Type, Union
16
+
17
+ from pydantic import BaseModel
18
+
19
+ from .base import BaseConverter
20
+
21
+
22
+ class OutlinesConverter(BaseConverter):
23
+ r"""OutlinesConverter is a class that converts a string or a function
24
+ into a BaseModel schema.
25
+
26
+ Args:
27
+ model_type (str, optional): The model type to be used.
28
+ platform (str, optional): The platform to be used.
29
+ 1. transformers
30
+ 2. mamba
31
+ 3. vllm
32
+ 4. llamacpp
33
+ 5. mlx
34
+ (default: "transformers")
35
+ **kwargs: The keyword arguments to be used. See the outlines
36
+ documentation for more details. See
37
+ https://dottxt-ai.github.io/outlines/latest/reference/models/models/
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ model_type: str,
43
+ platform: Literal[
44
+ "vllm", "transformers", "mamba", "llamacpp", "mlx"
45
+ ] = "transformers",
46
+ **kwargs: Any,
47
+ ):
48
+ self.model_type = model_type
49
+ from outlines import models
50
+
51
+ match platform:
52
+ case "vllm":
53
+ self._outlines_model = models.vllm(model_type, **kwargs)
54
+ case "transformers":
55
+ self._outlines_model = models.transformers(
56
+ model_type, **kwargs
57
+ )
58
+ case "mamba":
59
+ self._outlines_model = models.mamba(model_type, **kwargs)
60
+ case "llamacpp":
61
+ self._outlines_model = models.llamacpp(model_type, **kwargs)
62
+ case "mlx":
63
+ self._outlines_model = models.mlxlm(model_type, **kwargs)
64
+ case _:
65
+ raise ValueError(f"Unsupported platform: {platform}")
66
+
67
+ def convert_regex(self, content: str, regex_pattern: str) -> str:
68
+ r"""Convert the content to the specified regex pattern.
69
+
70
+ Args:
71
+ content (str): The content to be converted.
72
+ regex_pattern (str): The regex pattern to be used.
73
+
74
+ Returns:
75
+ str: The converted content.
76
+ """
77
+ import outlines
78
+
79
+ regex_generator = outlines.generate.regex(
80
+ self._outlines_model, regex_pattern
81
+ )
82
+ return regex_generator(content)
83
+
84
+ def convert_json(
85
+ self,
86
+ content: str,
87
+ output_schema: Union[str, Callable],
88
+ ) -> dict:
89
+ r"""Convert the content to the specified JSON schema given by
90
+ output_schema.
91
+
92
+ Args:
93
+ content (str): The content to be converted.
94
+ output_schema (Union[str, Callable]): The expected format of the
95
+ response.
96
+
97
+ Returns:
98
+ dict: The converted content in JSON format.
99
+ """
100
+ import outlines
101
+
102
+ json_generator = outlines.generate.json(
103
+ self._outlines_model, output_schema
104
+ )
105
+ return json_generator(content)
106
+
107
+ def convert_pydantic(
108
+ self,
109
+ content: str,
110
+ output_schema: Type[BaseModel],
111
+ ) -> BaseModel:
112
+ r"""Convert the content to the specified Pydantic schema.
113
+
114
+ Args:
115
+ content (str): The content to be converted.
116
+ output_schema (Type[BaseModel]): The expected format of the
117
+ response.
118
+
119
+ Returns:
120
+ BaseModel: The converted content in pydantic model format.
121
+ """
122
+ import outlines
123
+
124
+ json_generator = outlines.generate.json(
125
+ self._outlines_model, output_schema
126
+ )
127
+ return json_generator(content)
128
+
129
+ def convert_type(self, content: str, type_name: type) -> str:
130
+ r"""Convert the content to the specified type.
131
+
132
+ The following types are currently available:
133
+ 1. int
134
+ 2. float
135
+ 3. bool
136
+ 4. datetime.date
137
+ 5. datetime.time
138
+ 6. datetime.datetime
139
+ 7. custom types (https://dottxt-ai.github.io/outlines/latest/reference/generation/types/)
140
+
141
+ Args:
142
+ content (str): The content to be converted.
143
+ type_name (type): The type to be used.
144
+
145
+ Returns:
146
+ str: The converted content.
147
+ """
148
+ import outlines
149
+
150
+ type_generator = outlines.generate.format(
151
+ self._outlines_model, type_name
152
+ )
153
+ return type_generator(content)
154
+
155
+ def convert_choice(self, content: str, choices: List[str]) -> str:
156
+ r"""Convert the content to the specified choice.
157
+
158
+ Args:
159
+ content (str): The content to be converted.
160
+ choices (List[str]): The choices to be used.
161
+
162
+ Returns:
163
+ str: The converted content.
164
+ """
165
+ import outlines
166
+
167
+ choices_generator = outlines.generate.choice(
168
+ self._outlines_model, choices
169
+ )
170
+ return choices_generator(content)
171
+
172
+ def convert_grammar(self, content: str, grammar: str) -> str:
173
+ r"""Convert the content to the specified grammar.
174
+
175
+ Args:
176
+ content (str): The content to be converted.
177
+ grammar (str): The grammar to be used.
178
+
179
+ Returns:
180
+ str: The converted content.
181
+ """
182
+ import outlines
183
+
184
+ grammar_generator = outlines.generate.cfg(
185
+ self._outlines_model, grammar
186
+ )
187
+ return grammar_generator(content)
188
+
189
+ def convert( # type: ignore[override]
190
+ self,
191
+ content: str,
192
+ type: Literal["regex", "json", "type", "choice", "grammar"],
193
+ **kwargs,
194
+ ) -> Any:
195
+ r"""Formats the input content into the expected BaseModel.
196
+
197
+ Args:
198
+ type (Literal["regex", "json", "type", "choice", "grammar"]):
199
+ The type of conversion to perform. Options are:
200
+ - "regex": Match the content against a regex pattern.
201
+ - "pydantic": Convert the content into a pydantic model.
202
+ - "json": Convert the content into a JSON based on a
203
+ schema.
204
+ - "type": Convert the content into a specified type.
205
+ - "choice": Match the content against a list of valid
206
+ choices.
207
+ - "grammar": Convert the content using a specified grammar.
208
+ content (str): The content to be formatted.
209
+ **kwargs: Additional keyword arguments specific to the conversion
210
+ type.
211
+
212
+ - For "regex":
213
+ regex_pattern (str): The regex pattern to use for matching.
214
+
215
+ - For "pydantic":
216
+ output_schema (Type[BaseModel]): The schema to validate and
217
+ format the pydantic model.
218
+
219
+ - For "json":
220
+ output_schema (Union[str, Callable]): The schema to validate
221
+ and format the JSON object.
222
+
223
+ - For "type":
224
+ type_name (str): The target type name for the conversion.
225
+
226
+ - For "choice":
227
+ choices (List[str]): A list of valid choices to match against.
228
+
229
+ - For "grammar":
230
+ grammar (str): The grammar definition to use for content
231
+ conversion.
232
+ """
233
+ match type:
234
+ case "regex":
235
+ return self.convert_regex(content, kwargs.get("regex_pattern")) # type: ignore[arg-type]
236
+ case "pydantic":
237
+ return self.convert_pydantic(
238
+ content, kwargs.get("output_schema")
239
+ ) # type: ignore[arg-type]
240
+ case "json":
241
+ return self.convert_json(content, kwargs.get("output_schema")) # type: ignore[arg-type]
242
+ case "type":
243
+ return self.convert_type(content, kwargs.get("type_name")) # type: ignore[arg-type]
244
+ case "choice":
245
+ return self.convert_choice(content, kwargs.get("choices")) # type: ignore[arg-type]
246
+ case "grammar":
247
+ return self.convert_grammar(content, kwargs.get("grammar")) # type: ignore[arg-type]
248
+ case _:
249
+ raise ValueError("Unsupported output schema type")
@@ -509,8 +509,8 @@ class RolePlaying:
509
509
  # step and once in role play), and the model generates only one
510
510
  # response when multi-response support is enabled.
511
511
  if (
512
- 'n' in self.user_agent.model_config_dict.keys()
513
- and self.user_agent.model_config_dict['n'] > 1
512
+ 'n' in self.user_agent.model_backend.model_config_dict.keys()
513
+ and self.user_agent.model_backend.model_config_dict['n'] > 1
514
514
  ):
515
515
  self.user_agent.record_message(user_msg)
516
516
 
@@ -532,8 +532,8 @@ class RolePlaying:
532
532
  # step and once in role play), and the model generates only one
533
533
  # response when multi-response support is enabled.
534
534
  if (
535
- 'n' in self.assistant_agent.model_config_dict.keys()
536
- and self.assistant_agent.model_config_dict['n'] > 1
535
+ 'n' in self.assistant_agent.model_backend.model_config_dict.keys()
536
+ and self.assistant_agent.model_backend.model_config_dict['n'] > 1
537
537
  ):
538
538
  self.assistant_agent.record_message(assistant_msg)
539
539
 
@@ -251,7 +251,7 @@ class Workforce(BaseNode):
251
251
  additional_info = "A Workforce node"
252
252
  elif isinstance(child, SingleAgentWorker):
253
253
  additional_info = "tools: " + (
254
- ", ".join(child.worker.func_dict.keys())
254
+ ", ".join(child.worker.tool_dict.keys())
255
255
  )
256
256
  elif isinstance(child, RolePlayingWorker):
257
257
  additional_info = "A Role playing node"
@@ -369,7 +369,7 @@ class Workforce(BaseNode):
369
369
  model_config_dict=model_config_dict,
370
370
  )
371
371
 
372
- return ChatAgent(worker_sys_msg, model=model, tools=function_list)
372
+ return ChatAgent(worker_sys_msg, model=model, tools=function_list) # type: ignore[arg-type]
373
373
 
374
374
  async def _get_returned_task(self) -> Task:
375
375
  r"""Get the task that's published by this node and just get returned
@@ -12,9 +12,20 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
+ import logging
16
+ import re
15
17
  import time
16
18
  from typing import TYPE_CHECKING, Any, Dict, List, Tuple
17
19
 
20
+ from camel.storages.graph_storages.base import BaseGraphStorage
21
+ from camel.storages.graph_storages.graph_element import (
22
+ GraphElement,
23
+ )
24
+ from camel.utils.commons import dependencies_required
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
18
29
  if TYPE_CHECKING:
19
30
  from nebula3.data.ResultSet import ( # type: ignore[import-untyped]
20
31
  ResultSet,
@@ -24,11 +35,6 @@ if TYPE_CHECKING:
24
35
  Session,
25
36
  )
26
37
 
27
- from camel.storages.graph_storages.base import BaseGraphStorage
28
- from camel.storages.graph_storages.graph_element import (
29
- GraphElement,
30
- )
31
- from camel.utils.commons import dependencies_required
32
38
 
33
39
  MAX_RETRIES = 5
34
40
  RETRY_DELAY = 3
@@ -178,11 +184,21 @@ class NebulaGraph(BaseGraphStorage):
178
184
  """
179
185
  nodes = self._extract_nodes(graph_elements)
180
186
  for node in nodes:
181
- self.add_node(node['id'], node['type'])
187
+ try:
188
+ self.add_node(node['id'], node['type'])
189
+ except Exception as e:
190
+ logger.warning(f"Failed to add node {node}. Error: {e}")
191
+ continue
182
192
 
183
193
  relationships = self._extract_relationships(graph_elements)
184
194
  for rel in relationships:
185
- self.add_triplet(rel['subj']['id'], rel['obj']['id'], rel['type'])
195
+ try:
196
+ self.add_triplet(
197
+ rel['subj']['id'], rel['obj']['id'], rel['type']
198
+ )
199
+ except Exception as e:
200
+ logger.warning(f"Failed to add relationship {rel}. Error: {e}")
201
+ continue
186
202
 
187
203
  def ensure_edge_type_exists(
188
204
  self,
@@ -253,6 +269,9 @@ class NebulaGraph(BaseGraphStorage):
253
269
  node_id (str): The ID of the node.
254
270
  tag_name (str): The tag name of the node.
255
271
  """
272
+ node_id = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', node_id)
273
+ tag_name = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', tag_name)
274
+
256
275
  self.ensure_tag_exists(tag_name)
257
276
 
258
277
  # Insert node without properties
@@ -409,6 +428,10 @@ class NebulaGraph(BaseGraphStorage):
409
428
  obj (str): The identifier for the object entity.
410
429
  rel (str): The relationship between the subject and object.
411
430
  """
431
+ subj = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', subj)
432
+ obj = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', obj)
433
+ rel = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', rel)
434
+
412
435
  self.ensure_tag_exists(subj)
413
436
  self.ensure_tag_exists(obj)
414
437
  self.ensure_edge_type_exists(rel)
camel/types/enums.py CHANGED
@@ -34,6 +34,7 @@ class ModelType(UnifiedModelType, Enum):
34
34
  GPT_4_TURBO = "gpt-4-turbo"
35
35
  GPT_4O = "gpt-4o"
36
36
  GPT_4O_MINI = "gpt-4o-mini"
37
+ O1 = "o1"
37
38
  O1_PREVIEW = "o1-preview"
38
39
  O1_MINI = "o1-mini"
39
40
 
@@ -153,9 +154,15 @@ class ModelType(UnifiedModelType, Enum):
153
154
  return self.value
154
155
  return "gpt-4o-mini"
155
156
 
157
+ @property
158
+ def support_native_structured_output(self) -> bool:
159
+ return self.is_openai
160
+
156
161
  @property
157
162
  def support_native_tool_calling(self) -> bool:
158
- return any([self.is_openai, self.is_gemini, self.is_mistral])
163
+ return any(
164
+ [self.is_openai, self.is_gemini, self.is_mistral, self.is_qwen]
165
+ )
159
166
 
160
167
  @property
161
168
  def is_openai(self) -> bool:
@@ -166,6 +173,7 @@ class ModelType(UnifiedModelType, Enum):
166
173
  ModelType.GPT_4_TURBO,
167
174
  ModelType.GPT_4O,
168
175
  ModelType.GPT_4O_MINI,
176
+ ModelType.O1,
169
177
  ModelType.O1_PREVIEW,
170
178
  ModelType.O1_MINI,
171
179
  }
@@ -452,6 +460,7 @@ class ModelType(UnifiedModelType, Enum):
452
460
  }:
453
461
  return 131_072
454
462
  elif self in {
463
+ ModelType.O1,
455
464
  ModelType.CLAUDE_2_1,
456
465
  ModelType.CLAUDE_3_OPUS,
457
466
  ModelType.CLAUDE_3_SONNET,
@@ -113,6 +113,11 @@ class UnifiedModelType(str):
113
113
  r"""Returns whether the model is a Qwen model."""
114
114
  return True
115
115
 
116
+ @property
117
+ def support_native_structured_output(self) -> bool:
118
+ r"""Returns whether the model supports native structured output."""
119
+ return False
120
+
116
121
  @property
117
122
  def support_native_tool_calling(self) -> bool:
118
123
  r"""Returns whether the model supports native tool calling."""
camel/utils/__init__.py CHANGED
@@ -21,6 +21,7 @@ from .commons import (
21
21
  dependencies_required,
22
22
  download_tasks,
23
23
  func_string_to_callable,
24
+ generate_prompt_for_structured_output,
24
25
  get_first_int,
25
26
  get_prompt_template_key_words,
26
27
  get_pydantic_major_version,
@@ -78,4 +79,5 @@ __all__ = [
78
79
  "track_agent",
79
80
  "handle_http_error",
80
81
  "get_pydantic_model",
82
+ "generate_prompt_for_structured_output",
81
83
  ]
camel/utils/commons.py CHANGED
@@ -12,7 +12,6 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import importlib
15
- import logging
16
15
  import os
17
16
  import platform
18
17
  import re
@@ -40,19 +39,14 @@ import pydantic
40
39
  import requests
41
40
  from pydantic import BaseModel
42
41
 
43
- from camel.logger import get_logger
44
42
  from camel.types import TaskType
45
43
 
46
44
  from .constants import Constants
47
45
 
48
46
  F = TypeVar('F', bound=Callable[..., Any])
49
47
 
50
- logger = get_logger(__name__)
51
48
 
52
-
53
- def print_text_animated(
54
- text, delay: float = 0.02, end: str = "", log_level: int = logging.INFO
55
- ):
49
+ def print_text_animated(text, delay: float = 0.02, end: str = ""):
56
50
  r"""Prints the given text with an animated effect.
57
51
 
58
52
  Args:
@@ -61,22 +55,10 @@ def print_text_animated(
61
55
  (default: :obj:`0.02`)
62
56
  end (str, optional): The end character to print after each
63
57
  character of text. (default: :obj:`""`)
64
- log_level (int, optional): The log level to use.
65
- See https://docs.python.org/3/library/logging.html#levels
66
- (default: :obj:`logging.INFO`)
67
58
  """
68
- if logger.isEnabledFor(log_level):
69
- # timestamp and other prefixes
70
- logger.log(log_level, '')
71
-
72
- for char in text:
73
- print(char, end=end, flush=True)
74
- time.sleep(delay)
75
- # Close the log entry
76
- logger.log(log_level, '')
77
- else:
78
- # This may be relevant for logging frameworks
79
- logger.log(log_level, text)
59
+ for char in text:
60
+ print(char, end=end, flush=True)
61
+ time.sleep(delay)
80
62
 
81
63
 
82
64
  def get_prompt_template_key_words(template: str) -> Set[str]:
@@ -406,7 +388,8 @@ def json_to_function_code(json_obj: Dict) -> str:
406
388
  }
407
389
 
408
390
  for prop in required:
409
- description = properties[prop]['description']
391
+ # if no description, return empty string
392
+ description = properties[prop].get('description', "")
410
393
  prop_type = properties[prop]['type']
411
394
  python_type = prop_to_python.get(prop_type, prop_type)
412
395
  args.append(f"{prop}: {python_type}")
@@ -624,3 +607,39 @@ def retry_request(
624
607
  time.sleep(delay)
625
608
  else:
626
609
  raise
610
+
611
+
612
+ def generate_prompt_for_structured_output(
613
+ response_format: Optional[Type[BaseModel]],
614
+ user_message: str,
615
+ ) -> str:
616
+ """
617
+ This function generates a prompt based on the provided Pydantic model and
618
+ user message.
619
+
620
+ Args:
621
+ response_format (Type[BaseModel]): The Pydantic model class.
622
+ user_message (str): The user message to be used in the prompt.
623
+
624
+ Returns:
625
+ str: A prompt string for the LLM.
626
+ """
627
+ if response_format is None:
628
+ return user_message
629
+
630
+ json_schema = response_format.model_json_schema()
631
+ sys_prompt = (
632
+ "Given the user message, please generate a JSON response adhering "
633
+ "to the following JSON schema:\n"
634
+ f"{json_schema}\n"
635
+ "Make sure the JSON response is valid and matches the EXACT structure "
636
+ "defined in the schema. Your result should only be a valid json "
637
+ "object, without any other text or comments.\n"
638
+ )
639
+ user_prompt = f"User message: {user_message}\n"
640
+
641
+ final_prompt = f"""
642
+ {sys_prompt}
643
+ {user_prompt}
644
+ """
645
+ return final_prompt
@@ -63,6 +63,7 @@ def get_model_encoding(value_for_tiktoken: str):
63
63
  encoding = tiktoken.encoding_for_model(value_for_tiktoken)
64
64
  except KeyError:
65
65
  if value_for_tiktoken in [
66
+ ModelType.O1.value,
66
67
  ModelType.O1_MINI.value,
67
68
  ModelType.O1_PREVIEW.value,
68
69
  ]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.2.13
3
+ Version: 0.2.15a0
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -48,7 +48,7 @@ Requires-Dist: discord.py (>=2.3.2,<3.0.0) ; extra == "tools" or extra == "all"
48
48
  Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "runtime" or extra == "all"
49
49
  Requires-Dist: docstring-parser (>=0.15,<0.16)
50
50
  Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
51
- Requires-Dist: duckduckgo-search (>=6.2.12,<7.0.0) ; extra == "search-tools" or extra == "tools" or extra == "all"
51
+ Requires-Dist: duckduckgo-search (>=6.3.5,<7.0.0) ; extra == "search-tools" or extra == "tools" or extra == "all"
52
52
  Requires-Dist: e2b-code-interpreter (>=1.0.3,<2.0.0) ; extra == "tools" or extra == "all"
53
53
  Requires-Dist: eval-type-backport (==0.2.0)
54
54
  Requires-Dist: ffmpeg-python (>=0.2.0,<0.3.0) ; extra == "tools" or extra == "all"
@@ -68,12 +68,12 @@ Requires-Dist: mock (>=5,<6) ; extra == "test"
68
68
  Requires-Dist: nebula3-python (==3.8.2) ; extra == "rag" or extra == "graph-storages" or extra == "all"
69
69
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "rag" or extra == "graph-storages" or extra == "all"
70
70
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
71
- Requires-Dist: nltk (==3.9.1) ; extra == "tools" or extra == "all"
72
71
  Requires-Dist: notion-client (>=2.2.1,<3.0.0) ; extra == "tools" or extra == "all"
73
72
  Requires-Dist: numpy (>=1,<2)
74
- Requires-Dist: openai (>=1.45.0,<2.0.0)
73
+ Requires-Dist: openai (>=1.58.1,<2.0.0)
75
74
  Requires-Dist: openapi-spec-validator (>=0.7.1,<0.8.0) ; extra == "tools" or extra == "all"
76
75
  Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
76
+ Requires-Dist: outlines (>=0.1.7,<0.2.0) ; extra == "tools" or extra == "all"
77
77
  Requires-Dist: pandoc
78
78
  Requires-Dist: pathlib (>=1.0.1,<2.0.0)
79
79
  Requires-Dist: pdfplumber (>=0.11.0,<0.12.0) ; extra == "tools" or extra == "all"
@@ -109,7 +109,7 @@ Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
109
109
  Requires-Dist: torch (==2.2.1) ; (platform_system == "Darwin" and platform_machine != "arm64") and (extra == "huggingface-agent" or extra == "all")
110
110
  Requires-Dist: torch (>=2,<3) ; (platform_system != "Darwin" or platform_machine == "arm64") and (extra == "huggingface-agent" or extra == "all")
111
111
  Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
112
- Requires-Dist: unstructured[all-docs] (==0.16.11) ; extra == "rag" or extra == "all"
112
+ Requires-Dist: unstructured[all-docs] (==0.16.11) ; extra == "rag" or extra == "tools" or extra == "all" or extra == "all"
113
113
  Requires-Dist: wikipedia (>=1,<2) ; extra == "search-tools" or extra == "tools" or extra == "all"
114
114
  Requires-Dist: wolframalpha (>=5.0.0,<6.0.0) ; extra == "search-tools" or extra == "tools" or extra == "all"
115
115
  Requires-Dist: yt-dlp (>=2024.11.4,<2025.0.0) ; extra == "tools" or extra == "all"
@@ -263,7 +263,7 @@ conda create --name camel python=3.10
263
263
  conda activate camel
264
264
 
265
265
  # Clone github repo
266
- git clone -b v0.2.13 https://github.com/camel-ai/camel.git
266
+ git clone -b v0.2.15a0 https://github.com/camel-ai/camel.git
267
267
 
268
268
  # Change directory into project directory
269
269
  cd camel