camel-ai 0.2.10__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (36) hide show
  1. camel/__init__.py +6 -1
  2. camel/agents/chat_agent.py +74 -5
  3. camel/agents/deductive_reasoner_agent.py +4 -1
  4. camel/configs/__init__.py +3 -0
  5. camel/configs/mistral_config.py +0 -3
  6. camel/configs/nvidia_config.py +70 -0
  7. camel/configs/vllm_config.py +10 -1
  8. camel/embeddings/vlm_embedding.py +4 -1
  9. camel/interpreters/docker_interpreter.py +7 -2
  10. camel/interpreters/subprocess_interpreter.py +7 -2
  11. camel/loaders/firecrawl_reader.py +0 -3
  12. camel/logger.py +112 -0
  13. camel/messages/__init__.py +1 -1
  14. camel/messages/base.py +10 -7
  15. camel/messages/conversion/__init__.py +3 -1
  16. camel/messages/conversion/alpaca.py +122 -0
  17. camel/models/__init__.py +5 -0
  18. camel/models/model_factory.py +3 -0
  19. camel/models/model_manager.py +212 -0
  20. camel/models/nvidia_model.py +141 -0
  21. camel/models/openai_model.py +1 -0
  22. camel/retrievers/vector_retriever.py +22 -5
  23. camel/societies/babyagi_playing.py +4 -1
  24. camel/toolkits/__init__.py +3 -0
  25. camel/toolkits/code_execution.py +38 -4
  26. camel/toolkits/human_toolkit.py +1 -0
  27. camel/toolkits/meshy_toolkit.py +185 -0
  28. camel/toolkits/twitter_toolkit.py +3 -0
  29. camel/types/enums.py +41 -8
  30. camel/utils/commons.py +22 -5
  31. camel/utils/token_counting.py +4 -1
  32. {camel_ai-0.2.10.dist-info → camel_ai-0.2.11.dist-info}/METADATA +2 -2
  33. {camel_ai-0.2.10.dist-info → camel_ai-0.2.11.dist-info}/RECORD +36 -30
  34. /camel/messages/conversion/{models.py → conversation_models.py} +0 -0
  35. {camel_ai-0.2.10.dist-info → camel_ai-0.2.11.dist-info}/LICENSE +0 -0
  36. {camel_ai-0.2.10.dist-info → camel_ai-0.2.11.dist-info}/WHEEL +0 -0
@@ -11,9 +11,14 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- from typing import List, Literal, Optional
14
+ from typing import List, Literal, Optional, Union
15
15
 
16
- from camel.interpreters import InternalPythonInterpreter
16
+ from camel.interpreters import (
17
+ DockerInterpreter,
18
+ InternalPythonInterpreter,
19
+ JupyterKernelInterpreter,
20
+ SubprocessInterpreter,
21
+ )
17
22
  from camel.toolkits import FunctionTool
18
23
  from camel.toolkits.base import BaseToolkit
19
24
 
@@ -29,26 +34,55 @@ class CodeExecutionToolkit(BaseToolkit):
29
34
  by `eval()` without any security check. (default: :obj:`False`)
30
35
  import_white_list ( Optional[List[str]]): A list of allowed imports.
31
36
  (default: :obj:`None`)
37
+ require_confirm (bool): Whether to require confirmation before executing code.
38
+ (default: :obj:`False`)
32
39
  """
33
40
 
34
41
  def __init__(
35
42
  self,
36
43
  sandbox: Literal[
37
- "internal_python", "jupyter", "docker"
44
+ "internal_python", "jupyter", "docker", "subprocess"
38
45
  ] = "internal_python",
39
46
  verbose: bool = False,
40
47
  unsafe_mode: bool = False,
41
48
  import_white_list: Optional[List[str]] = None,
49
+ require_confirm: bool = False,
42
50
  ) -> None:
43
- # TODO: Add support for docker and jupyter.
44
51
  self.verbose = verbose
45
52
  self.unsafe_mode = unsafe_mode
46
53
  self.import_white_list = import_white_list or list()
54
+
55
+ # Type annotation for interpreter to allow all possible types
56
+ self.interpreter: Union[
57
+ InternalPythonInterpreter,
58
+ JupyterKernelInterpreter,
59
+ DockerInterpreter,
60
+ SubprocessInterpreter,
61
+ ]
62
+
47
63
  if sandbox == "internal_python":
48
64
  self.interpreter = InternalPythonInterpreter(
49
65
  unsafe_mode=self.unsafe_mode,
50
66
  import_white_list=self.import_white_list,
51
67
  )
68
+ elif sandbox == "jupyter":
69
+ self.interpreter = JupyterKernelInterpreter(
70
+ require_confirm=require_confirm,
71
+ print_stdout=self.verbose,
72
+ print_stderr=self.verbose,
73
+ )
74
+ elif sandbox == "docker":
75
+ self.interpreter = DockerInterpreter(
76
+ require_confirm=require_confirm,
77
+ print_stdout=self.verbose,
78
+ print_stderr=self.verbose,
79
+ )
80
+ elif sandbox == "subprocess":
81
+ self.interpreter = SubprocessInterpreter(
82
+ require_confirm=require_confirm,
83
+ print_stdout=self.verbose,
84
+ print_stderr=self.verbose,
85
+ )
52
86
  else:
53
87
  raise RuntimeError(
54
88
  f"The sandbox type `{sandbox}` is not supported."
@@ -36,6 +36,7 @@ class HumanToolkit(BaseToolkit):
36
36
  Returns:
37
37
  str: The answer from the human.
38
38
  """
39
+ print(f"Question: {question}")
39
40
  logger.info(f"Question: {question}")
40
41
  reply = input("Your reply: ")
41
42
  logger.info(f"User reply: {reply}")
@@ -0,0 +1,185 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict
17
+
18
+ import requests
19
+
20
+ from camel.toolkits.base import BaseToolkit
21
+ from camel.utils import api_keys_required
22
+
23
+
24
+ class MeshyToolkit(BaseToolkit):
25
+ r"""A class representing a toolkit for 3D model generation using Meshy.
26
+
27
+ This class provides methods that handle text/image to 3D model
28
+ generation using Meshy.
29
+
30
+ Call the generate_3d_model_complete method to generate a refined 3D model.
31
+
32
+ Ref:
33
+ https://docs.meshy.ai/api-text-to-3d-beta#create-a-text-to-3d-preview-task
34
+ """
35
+
36
+ @api_keys_required("MESHY_API_KEY")
37
+ def __init__(self):
38
+ r"""Initializes the MeshyToolkit with the API key from the
39
+ environment.
40
+ """
41
+ self.api_key = os.getenv('MESHY_API_KEY')
42
+
43
+ def generate_3d_preview(
44
+ self, prompt: str, art_style: str, negative_prompt: str
45
+ ) -> Dict[str, Any]:
46
+ r"""Generates a 3D preview using the Meshy API.
47
+
48
+ Args:
49
+ prompt (str): Description of the object.
50
+ art_style (str): Art style for the 3D model.
51
+ negative_prompt (str): What the model should not look like.
52
+
53
+ Returns:
54
+ Dict[str, Any]: The result property of the response contains the
55
+ task id of the newly created Text to 3D task.
56
+ """
57
+ payload = {
58
+ "mode": "preview",
59
+ "prompt": prompt,
60
+ "art_style": art_style,
61
+ "negative_prompt": negative_prompt,
62
+ }
63
+ headers = {"Authorization": f"Bearer {self.api_key}"}
64
+
65
+ response = requests.post(
66
+ "https://api.meshy.ai/v2/text-to-3d",
67
+ headers=headers,
68
+ json=payload,
69
+ )
70
+ response.raise_for_status()
71
+ return response.json()
72
+
73
+ def refine_3d_model(self, preview_task_id: str) -> Dict[str, Any]:
74
+ r"""Refines a 3D model using the Meshy API.
75
+
76
+ Args:
77
+ preview_task_id (str): The task ID of the preview to refine.
78
+
79
+ Returns:
80
+ Dict[str, Any]: The response from the Meshy API.
81
+ """
82
+ payload = {"mode": "refine", "preview_task_id": preview_task_id}
83
+ headers = {"Authorization": f"Bearer {self.api_key}"}
84
+
85
+ response = requests.post(
86
+ "https://api.meshy.ai/v2/text-to-3d",
87
+ headers=headers,
88
+ json=payload,
89
+ )
90
+ response.raise_for_status()
91
+ return response.json()
92
+
93
+ def get_task_status(self, task_id: str) -> Dict[str, Any]:
94
+ r"""Retrieves the status or result of a specific 3D model generation
95
+ task using the Meshy API.
96
+
97
+ Args:
98
+ task_id (str): The ID of the task to retrieve.
99
+
100
+ Returns:
101
+ Dict[str, Any]: The response from the Meshy API.
102
+ """
103
+ headers = {"Authorization": f"Bearer {self.api_key}"}
104
+
105
+ response = requests.get(
106
+ f"https://api.meshy.ai/v2/text-to-3d/{task_id}",
107
+ headers=headers,
108
+ )
109
+ response.raise_for_status()
110
+ return response.json()
111
+
112
+ def wait_for_task_completion(
113
+ self, task_id: str, polling_interval: int = 10, timeout: int = 3600
114
+ ) -> Dict[str, Any]:
115
+ r"""Waits for a task to complete by polling its status.
116
+
117
+ Args:
118
+ task_id (str): The ID of the task to monitor.
119
+ polling_interval (int): Seconds to wait between status checks.
120
+ (default::obj:`10`)
121
+ timeout (int): Maximum seconds to wait before timing out.
122
+ (default::obj:`3600`)
123
+
124
+ Returns:
125
+ Dict[str, Any]: Final response from the API when task completes.
126
+
127
+ Raises:
128
+ TimeoutError: If task doesn't complete within timeout period.
129
+ RuntimeError: If task fails or is canceled.
130
+ """
131
+ import time
132
+
133
+ start_time = time.time()
134
+
135
+ while True:
136
+ if time.time() - start_time > timeout:
137
+ raise TimeoutError(
138
+ f"Task {task_id} timed out after {timeout} seconds"
139
+ )
140
+
141
+ response = self.get_task_status(task_id)
142
+ status = response.get("status") # Direct access to status field
143
+ elapsed = int(time.time() - start_time)
144
+
145
+ print(f"Status after {elapsed}s: {status}")
146
+
147
+ if status == "SUCCEEDED":
148
+ return response
149
+ elif status in [
150
+ "FAILED",
151
+ "CANCELED",
152
+ ]: # Also updating these status values
153
+ raise RuntimeError(f"Task {task_id} {status}")
154
+
155
+ time.sleep(polling_interval)
156
+
157
+ def generate_3d_model_complete(
158
+ self, prompt: str, art_style: str, negative_prompt: str
159
+ ) -> Dict[str, Any]:
160
+ r"""Generates a complete 3D model by handling preview and refinement
161
+ stages
162
+
163
+ Args:
164
+ prompt (str): Description of the object.
165
+ art_style (str): Art style for the 3D model.
166
+ negative_prompt (str): What the model should not look like.
167
+
168
+ Returns:
169
+ Dict[str, Any]: The final refined 3D model response.
170
+ """
171
+ # Generate preview
172
+ preview_response = self.generate_3d_preview(
173
+ prompt, art_style, negative_prompt
174
+ )
175
+ preview_task_id = str(preview_response.get("result"))
176
+
177
+ # Wait for preview completion
178
+ self.wait_for_task_completion(preview_task_id)
179
+
180
+ # Start refinement
181
+ refine_response = self.refine_3d_model(preview_task_id)
182
+ refine_task_id = str(refine_response.get("result"))
183
+
184
+ # Wait for refinement completion and return final result
185
+ return self.wait_for_task_completion(refine_task_id)
@@ -20,12 +20,15 @@ from typing import Any, Dict, List, Optional, Union
20
20
  import requests
21
21
  from requests_oauthlib import OAuth1
22
22
 
23
+ from camel.logger import get_logger
23
24
  from camel.toolkits import FunctionTool
24
25
  from camel.toolkits.base import BaseToolkit
25
26
  from camel.utils import api_keys_required
26
27
 
27
28
  TWEET_TEXT_LIMIT = 280
28
29
 
30
+ logger = get_logger(__name__)
31
+
29
32
 
30
33
  @api_keys_required(
31
34
  "TWITTER_CONSUMER_KEY",
camel/types/enums.py CHANGED
@@ -65,7 +65,17 @@ class ModelType(UnifiedModelType, Enum):
65
65
  CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620"
66
66
 
67
67
  # Nvidia models
68
- NEMOTRON_4_REWARD = "nvidia/nemotron-4-340b-reward"
68
+ NVIDIA_NEMOTRON_340B_INSTRUCT = "nvidia/nemotron-4-340b-instruct"
69
+ NVIDIA_NEMOTRON_340B_REWARD = "nvidia/nemotron-4-340b-reward"
70
+ NVIDIA_YI_LARGE = "01-ai/yi-large"
71
+ NVIDIA_MISTRAL_LARGE = "mistralai/mistral-large"
72
+ NVIDIA_MIXTRAL_8X7B = "mistralai/mixtral-8x7b-instruct"
73
+ NVIDIA_LLAMA3_70B = "meta/llama3-70b"
74
+ NVIDIA_LLAMA3_1_8B_INSTRUCT = "meta/llama-3.1-8b-instruct"
75
+ NVIDIA_LLAMA3_1_70B_INSTRUCT = "meta/llama-3.1-70b-instruct"
76
+ NVIDIA_LLAMA3_1_405B_INSTRUCT = "meta/llama-3.1-405b-instruct"
77
+ NVIDIA_LLAMA3_2_1B_INSTRUCT = "meta/llama-3.2-1b-instruct"
78
+ NVIDIA_LLAMA3_2_3B_INSTRUCT = "meta/llama-3.2-3b-instruct"
69
79
 
70
80
  # Gemini models
71
81
  GEMINI_1_5_FLASH = "gemini-1.5-flash"
@@ -165,6 +175,7 @@ class ModelType(UnifiedModelType, Enum):
165
175
  ModelType.GPT_4,
166
176
  ModelType.GPT_4_TURBO,
167
177
  ModelType.GPT_4O,
178
+ ModelType.GPT_4O_MINI,
168
179
  }
169
180
 
170
181
  @property
@@ -225,13 +236,19 @@ class ModelType(UnifiedModelType, Enum):
225
236
 
226
237
  @property
227
238
  def is_nvidia(self) -> bool:
228
- r"""Returns whether this type of models is Nvidia-released model.
229
-
230
- Returns:
231
- bool: Whether this type of models is nvidia.
232
- """
239
+ r"""Returns whether this type of models is a NVIDIA model."""
233
240
  return self in {
234
- ModelType.NEMOTRON_4_REWARD,
241
+ ModelType.NVIDIA_NEMOTRON_340B_INSTRUCT,
242
+ ModelType.NVIDIA_NEMOTRON_340B_REWARD,
243
+ ModelType.NVIDIA_YI_LARGE,
244
+ ModelType.NVIDIA_MISTRAL_LARGE,
245
+ ModelType.NVIDIA_LLAMA3_70B,
246
+ ModelType.NVIDIA_MIXTRAL_8X7B,
247
+ ModelType.NVIDIA_LLAMA3_1_8B_INSTRUCT,
248
+ ModelType.NVIDIA_LLAMA3_1_70B_INSTRUCT,
249
+ ModelType.NVIDIA_LLAMA3_1_405B_INSTRUCT,
250
+ ModelType.NVIDIA_LLAMA3_2_1B_INSTRUCT,
251
+ ModelType.NVIDIA_LLAMA3_2_3B_INSTRUCT,
235
252
  }
236
253
 
237
254
  @property
@@ -329,7 +346,6 @@ class ModelType(UnifiedModelType, Enum):
329
346
  if self is ModelType.GLM_4V:
330
347
  return 1024
331
348
  elif self in {
332
- ModelType.NEMOTRON_4_REWARD,
333
349
  ModelType.STUB,
334
350
  ModelType.REKA_CORE,
335
351
  ModelType.REKA_EDGE,
@@ -338,6 +354,8 @@ class ModelType(UnifiedModelType, Enum):
338
354
  ModelType.QWEN_MATH_TURBO,
339
355
  ModelType.COHERE_COMMAND,
340
356
  ModelType.COHERE_COMMAND_LIGHT,
357
+ ModelType.NVIDIA_NEMOTRON_340B_INSTRUCT,
358
+ ModelType.NVIDIA_NEMOTRON_340B_REWARD,
341
359
  }:
342
360
  return 4_096
343
361
  elif self in {
@@ -349,6 +367,7 @@ class ModelType(UnifiedModelType, Enum):
349
367
  ModelType.GLM_3_TURBO,
350
368
  ModelType.GLM_4,
351
369
  ModelType.QWEN_VL_PLUS,
370
+ ModelType.NVIDIA_LLAMA3_70B,
352
371
  }:
353
372
  return 8_192
354
373
  elif self in {
@@ -370,6 +389,9 @@ class ModelType(UnifiedModelType, Enum):
370
389
  ModelType.YI_LARGE_FC,
371
390
  ModelType.QWEN_MAX,
372
391
  ModelType.QWEN_VL_MAX,
392
+ ModelType.NVIDIA_YI_LARGE,
393
+ ModelType.NVIDIA_MISTRAL_LARGE,
394
+ ModelType.NVIDIA_MIXTRAL_8X7B,
373
395
  ModelType.QWEN_QWQ_32B,
374
396
  }:
375
397
  return 32_768
@@ -401,6 +423,11 @@ class ModelType(UnifiedModelType, Enum):
401
423
  ModelType.COHERE_COMMAND_R,
402
424
  ModelType.COHERE_COMMAND_R_PLUS,
403
425
  ModelType.COHERE_COMMAND_NIGHTLY,
426
+ ModelType.NVIDIA_LLAMA3_1_8B_INSTRUCT,
427
+ ModelType.NVIDIA_LLAMA3_1_70B_INSTRUCT,
428
+ ModelType.NVIDIA_LLAMA3_1_405B_INSTRUCT,
429
+ ModelType.NVIDIA_LLAMA3_2_1B_INSTRUCT,
430
+ ModelType.NVIDIA_LLAMA3_2_3B_INSTRUCT,
404
431
  }:
405
432
  return 128_000
406
433
  elif self in {
@@ -582,6 +609,7 @@ class ModelPlatformType(Enum):
582
609
  COHERE = "cohere"
583
610
  YI = "lingyiwanwu"
584
611
  QWEN = "tongyi-qianwen"
612
+ NVIDIA = "nvidia"
585
613
  DEEPSEEK = "deepseek"
586
614
 
587
615
  @property
@@ -670,6 +698,11 @@ class ModelPlatformType(Enum):
670
698
  r"""Returns whether this platform is Qwen."""
671
699
  return self is ModelPlatformType.QWEN
672
700
 
701
+ @property
702
+ def is_nvidia(self) -> bool:
703
+ r"""Returns whether this platform is Nvidia."""
704
+ return self is ModelPlatformType.NVIDIA
705
+
673
706
  @property
674
707
  def is_deepseek(self) -> bool:
675
708
  r"""Returns whether this platform is DeepSeek."""
camel/utils/commons.py CHANGED
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import importlib
15
+ import logging
15
16
  import os
16
17
  import platform
17
18
  import re
@@ -39,14 +40,19 @@ import pydantic
39
40
  import requests
40
41
  from pydantic import BaseModel
41
42
 
43
+ from camel.logger import get_logger
42
44
  from camel.types import TaskType
43
45
 
44
46
  from .constants import Constants
45
47
 
46
48
  F = TypeVar('F', bound=Callable[..., Any])
47
49
 
50
+ logger = get_logger(__name__)
48
51
 
49
- def print_text_animated(text, delay: float = 0.02, end: str = ""):
52
+
53
+ def print_text_animated(
54
+ text, delay: float = 0.02, end: str = "", log_level: int = logging.INFO
55
+ ):
50
56
  r"""Prints the given text with an animated effect.
51
57
 
52
58
  Args:
@@ -55,11 +61,22 @@ def print_text_animated(text, delay: float = 0.02, end: str = ""):
55
61
  (default: :obj:`0.02`)
56
62
  end (str, optional): The end character to print after each
57
63
  character of text. (default: :obj:`""`)
64
+ log_level (int, optional): The log level to use.
65
+ See https://docs.python.org/3/library/logging.html#levels
66
+ (default: :obj:`logging.INFO`)
58
67
  """
59
- for char in text:
60
- print(char, end=end, flush=True)
61
- time.sleep(delay)
62
- print('\n')
68
+ if logger.isEnabledFor(log_level):
69
+ # timestamp and other prefixes
70
+ logger.log(log_level, '')
71
+
72
+ for char in text:
73
+ print(char, end=end, flush=True)
74
+ time.sleep(delay)
75
+ # Close the log entry
76
+ logger.log(log_level, '')
77
+ else:
78
+ # This may be relevant for logging frameworks
79
+ logger.log(log_level, text)
63
80
 
64
81
 
65
82
  def get_prompt_template_key_words(template: str) -> Set[str]:
@@ -22,6 +22,7 @@ from typing import TYPE_CHECKING, List, Optional
22
22
 
23
23
  from PIL import Image
24
24
 
25
+ from camel.logger import get_logger
25
26
  from camel.types import (
26
27
  ModelType,
27
28
  OpenAIImageType,
@@ -44,6 +45,8 @@ SQUARE_PIXELS = 512
44
45
  SQUARE_TOKENS = 170
45
46
  EXTRA_TOKENS = 85
46
47
 
48
+ logger = get_logger(__name__)
49
+
47
50
 
48
51
  def get_model_encoding(value_for_tiktoken: str):
49
52
  r"""Get model encoding from tiktoken.
@@ -65,7 +68,7 @@ def get_model_encoding(value_for_tiktoken: str):
65
68
  ]:
66
69
  encoding = tiktoken.get_encoding("o200k_base")
67
70
  else:
68
- print("Model not found. Using cl100k_base encoding.")
71
+ logger.info("Model not found. Using cl100k_base encoding.")
69
72
  encoding = tiktoken.get_encoding("cl100k_base")
70
73
  return encoding
71
74
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.2.10
3
+ Version: 0.2.11
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -257,7 +257,7 @@ conda create --name camel python=3.10
257
257
  conda activate camel
258
258
 
259
259
  # Clone github repo
260
- git clone -b v0.2.10 https://github.com/camel-ai/camel.git
260
+ git clone -b v0.2.11 https://github.com/camel-ai/camel.git
261
261
 
262
262
  # Change directory into project directory
263
263
  cd camel