camel-ai 0.2.36__py3-none-any.whl → 0.2.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (40) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/__init__.py +2 -0
  3. camel/agents/repo_agent.py +579 -0
  4. camel/configs/aiml_config.py +20 -19
  5. camel/configs/anthropic_config.py +25 -27
  6. camel/configs/cohere_config.py +11 -10
  7. camel/configs/deepseek_config.py +16 -16
  8. camel/configs/gemini_config.py +8 -8
  9. camel/configs/groq_config.py +18 -19
  10. camel/configs/internlm_config.py +8 -8
  11. camel/configs/litellm_config.py +26 -24
  12. camel/configs/mistral_config.py +8 -8
  13. camel/configs/moonshot_config.py +11 -11
  14. camel/configs/nvidia_config.py +13 -13
  15. camel/configs/ollama_config.py +14 -15
  16. camel/configs/openai_config.py +3 -3
  17. camel/configs/openrouter_config.py +9 -9
  18. camel/configs/qwen_config.py +8 -8
  19. camel/configs/reka_config.py +12 -11
  20. camel/configs/samba_config.py +14 -14
  21. camel/configs/sglang_config.py +15 -16
  22. camel/configs/siliconflow_config.py +18 -17
  23. camel/configs/togetherai_config.py +18 -19
  24. camel/configs/vllm_config.py +18 -19
  25. camel/configs/yi_config.py +7 -8
  26. camel/configs/zhipuai_config.py +8 -9
  27. camel/datasets/static_dataset.py +25 -23
  28. camel/environments/models.py +3 -0
  29. camel/environments/single_step.py +222 -136
  30. camel/extractors/__init__.py +16 -1
  31. camel/toolkits/__init__.py +2 -0
  32. camel/toolkits/thinking_toolkit.py +74 -0
  33. camel/types/enums.py +3 -0
  34. camel/utils/chunker/code_chunker.py +9 -15
  35. camel/verifiers/base.py +28 -5
  36. camel/verifiers/python_verifier.py +313 -68
  37. {camel_ai-0.2.36.dist-info → camel_ai-0.2.37.dist-info}/METADATA +52 -5
  38. {camel_ai-0.2.36.dist-info → camel_ai-0.2.37.dist-info}/RECORD +40 -38
  39. {camel_ai-0.2.36.dist-info → camel_ai-0.2.37.dist-info}/WHEEL +0 -0
  40. {camel_ai-0.2.36.dist-info → camel_ai-0.2.37.dist-info}/licenses/LICENSE +0 -0
@@ -12,5 +12,20 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from .base import BaseExtractor, BaseExtractorStrategy
15
+ from .python_strategies import (
16
+ BoxedStrategy,
17
+ PythonDictStrategy,
18
+ PythonListStrategy,
19
+ PythonSetStrategy,
20
+ PythonTupleStrategy,
21
+ )
15
22
 
16
- __all__ = ["BaseExtractor", "BaseExtractorStrategy"]
23
+ __all__ = [
24
+ "BaseExtractor",
25
+ "BaseExtractorStrategy",
26
+ "BoxedStrategy",
27
+ "PythonListStrategy",
28
+ "PythonDictStrategy",
29
+ "PythonSetStrategy",
30
+ "PythonTupleStrategy",
31
+ ]
@@ -60,6 +60,7 @@ from .browser_toolkit import BrowserToolkit
60
60
  from .file_write_toolkit import FileWriteToolkit
61
61
  from .terminal_toolkit import TerminalToolkit
62
62
  from .pubmed_toolkit import PubMedToolkit
63
+ from .thinking_toolkit import ThinkingToolkit
63
64
 
64
65
  __all__ = [
65
66
  'BaseToolkit',
@@ -108,4 +109,5 @@ __all__ = [
108
109
  'FileWriteToolkit',
109
110
  'TerminalToolkit',
110
111
  'PubMedToolkit',
112
+ 'ThinkingToolkit',
111
113
  ]
@@ -0,0 +1,74 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ from typing import List, Optional
16
+
17
+ from camel.logger import get_logger
18
+ from camel.toolkits import FunctionTool
19
+ from camel.toolkits.base import BaseToolkit
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class ThinkingToolkit(BaseToolkit):
25
+ r"""A toolkit for recording thoughts during reasoning processes.
26
+
27
+ Attributes:
28
+ thoughts (List[str]): A list to store the recorded thoughts.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ timeout: Optional[float] = None,
34
+ ):
35
+ r"""Initialize the ThinkingToolkit.
36
+
37
+ Args:
38
+ timeout (Optional[float]): The timeout for the toolkit.
39
+ (default: :obj: `None`)
40
+ """
41
+ super().__init__(timeout=timeout)
42
+ self.thoughts: List[str] = []
43
+
44
+ def think(self, thought: str) -> str:
45
+ r"""Use the tool to think about something.
46
+ It will not obtain new information or change the database, but just
47
+ append the thought to the log. Use it when complex reasoning or some
48
+ cache memory is needed.
49
+
50
+ Args:
51
+ thought (str): A thought to think about.
52
+
53
+ Returns:
54
+ str: The full log of thoughts including the new thought.
55
+ """
56
+ try:
57
+ logger.debug(f"Thought: {thought}")
58
+ self.thoughts.append(thought)
59
+
60
+ thoughts = "\n".join([f"- {t}" for t in self.thoughts])
61
+ return f"Thoughts:\n{thoughts}"
62
+
63
+ except Exception as e:
64
+ error_msg = f"Error recording thought: {e}"
65
+ logger.error(error_msg)
66
+ return error_msg
67
+
68
+ def get_tools(self) -> List[FunctionTool]:
69
+ r"""Get all tools in the toolkit.
70
+
71
+ Returns:
72
+ List[FunctionTool]: A list of tools.
73
+ """
74
+ return [FunctionTool(self.think)]
camel/types/enums.py CHANGED
@@ -123,6 +123,7 @@ class ModelType(UnifiedModelType, Enum):
123
123
  NVIDIA_LLAMA3_3_70B_INSTRUCT = "meta/llama-3.3-70b-instruct"
124
124
 
125
125
  # Gemini models
126
+ GEMINI_2_5_PRO_EXP = "gemini-2.5-pro-exp-03-25"
126
127
  GEMINI_2_0_FLASH = "gemini-2.0-flash-exp"
127
128
  GEMINI_2_0_FLASH_THINKING = "gemini-2.0-flash-thinking-exp"
128
129
  GEMINI_2_0_PRO_EXP = "gemini-2.0-pro-exp-02-05"
@@ -420,6 +421,7 @@ class ModelType(UnifiedModelType, Enum):
420
421
  bool: Whether this type of models is gemini.
421
422
  """
422
423
  return self in {
424
+ ModelType.GEMINI_2_5_PRO_EXP,
423
425
  ModelType.GEMINI_2_0_FLASH,
424
426
  ModelType.GEMINI_1_5_FLASH,
425
427
  ModelType.GEMINI_1_5_PRO,
@@ -724,6 +726,7 @@ class ModelType(UnifiedModelType, Enum):
724
726
  }:
725
727
  return 256_000
726
728
  elif self in {
729
+ ModelType.GEMINI_2_5_PRO_EXP,
727
730
  ModelType.GEMINI_2_0_FLASH,
728
731
  ModelType.GEMINI_1_5_FLASH,
729
732
  ModelType.GEMINI_1_5_PRO,
@@ -16,9 +16,7 @@ from typing import List, Optional
16
16
 
17
17
  from unstructured.documents.elements import Element, ElementMetadata
18
18
 
19
- from camel.messages import OpenAIUserMessage
20
- from camel.types import ModelType
21
- from camel.utils import BaseTokenCounter, OpenAITokenCounter
19
+ from camel.utils import get_model_encoding
22
20
 
23
21
  from .base import BaseChunker
24
22
 
@@ -38,20 +36,18 @@ class CodeChunker(BaseChunker):
38
36
  token counting, if `None`, OpenAITokenCounter will be used.
39
37
  (default: :obj:`None`)
40
38
  remove_image: (bool, optional): If the chunker should skip the images.
39
+ model_name (str, optional): The tokenizer model name used
40
+ for token counting. (default: :obj:`"cl100k_base"`)
41
41
  """
42
42
 
43
43
  def __init__(
44
44
  self,
45
45
  chunk_size: int = 8192,
46
- token_counter: Optional[BaseTokenCounter] = None,
46
+ model_name: str = "cl100k_base",
47
47
  remove_image: Optional[bool] = True,
48
48
  ):
49
49
  self.chunk_size = chunk_size
50
- self.token_counter = (
51
- token_counter
52
- if token_counter
53
- else OpenAITokenCounter(model=ModelType.GPT_4O_MINI)
54
- )
50
+ self.tokenizer = get_model_encoding(model_name)
55
51
  self.remove_image = remove_image
56
52
  self.struct_pattern = re.compile(
57
53
  r'^\s*(?:(def|class|function)\s+\w+|'
@@ -72,9 +68,7 @@ class CodeChunker(BaseChunker):
72
68
  Returns:
73
69
  int: The number of tokens in the input text.
74
70
  """
75
- return self.token_counter.count_tokens_from_messages(
76
- [OpenAIUserMessage(role="user", name="user", content=text)]
77
- )
71
+ return len(self.tokenizer.encode(text, disallowed_special=()))
78
72
 
79
73
  def _split_oversized(self, line: str) -> List[str]:
80
74
  r"""Splits an oversized line into multiple chunks based on token limits
@@ -86,7 +80,7 @@ class CodeChunker(BaseChunker):
86
80
  List[str]: A list of smaller chunks after splitting the
87
81
  oversized line.
88
82
  """
89
- tokens = self.token_counter.encode(line)
83
+ tokens = self.tokenizer.encode(line, disallowed_special=())
90
84
  chunks = []
91
85
  buffer = []
92
86
  current_count = 0
@@ -96,12 +90,12 @@ class CodeChunker(BaseChunker):
96
90
  current_count += 1
97
91
 
98
92
  if current_count >= self.chunk_size:
99
- chunks.append(self.token_counter.decode(buffer).strip())
93
+ chunks.append(self.tokenizer.decode(buffer).strip())
100
94
  buffer = []
101
95
  current_count = 0
102
96
 
103
97
  if buffer:
104
- chunks.append(self.token_counter.decode(buffer))
98
+ chunks.append(self.tokenizer.decode(buffer))
105
99
  return chunks
106
100
 
107
101
  def chunk(self, content: List[str]) -> List[Element]:
camel/verifiers/base.py CHANGED
@@ -16,6 +16,7 @@ import time
16
16
  from abc import ABC, abstractmethod
17
17
  from typing import List, Optional
18
18
 
19
+ from camel.extractors.base import BaseExtractor
19
20
  from camel.logger import get_logger
20
21
  from camel.utils import BatchProcessor
21
22
 
@@ -44,6 +45,7 @@ class BaseVerifier(ABC):
44
45
 
45
46
  def __init__(
46
47
  self,
48
+ extractor: Optional[BaseExtractor] = None,
47
49
  max_parallel: Optional[int] = None,
48
50
  timeout: Optional[float] = None,
49
51
  max_retries: int = 3,
@@ -72,6 +74,9 @@ class BaseVerifier(ABC):
72
74
  down. (default: :obj:`85.0`)
73
75
  **kwargs: Additional verifier parameters.
74
76
  """
77
+
78
+ self.extractor = extractor
79
+
75
80
  self._is_setup: bool = False
76
81
  self._max_parallel: Optional[int] = max_parallel
77
82
  self._timeout: Optional[float] = timeout
@@ -82,7 +87,7 @@ class BaseVerifier(ABC):
82
87
  self._memory_threshold: float = memory_threshold
83
88
  self._batch_processor: BatchProcessor = BatchProcessor()
84
89
 
85
- async def setup(self) -> None:
90
+ async def setup(self, **kwargs) -> None:
86
91
  r"""Set up the verifier with necessary resources.
87
92
 
88
93
  Initializes:
@@ -97,6 +102,8 @@ class BaseVerifier(ABC):
97
102
  return
98
103
 
99
104
  try:
105
+ if self.extractor:
106
+ await self.extractor.setup()
100
107
  batch_size = max(1, self._initial_batch_size or 10)
101
108
  max_parallel = max(1, self._max_parallel or 1)
102
109
  self._batch_processor = BatchProcessor()
@@ -106,7 +113,7 @@ class BaseVerifier(ABC):
106
113
  f"batch_size={batch_size}, max_parallel={max_parallel}"
107
114
  )
108
115
 
109
- await self._setup()
116
+ await self._setup(**kwargs)
110
117
  self._is_setup = True
111
118
 
112
119
  except Exception as e:
@@ -118,7 +125,7 @@ class BaseVerifier(ABC):
118
125
  raise RuntimeError(error_msg) from e
119
126
 
120
127
  @abstractmethod
121
- async def _setup(self) -> None:
128
+ async def _setup(self, **kwargs) -> None:
122
129
  r"""Implement verifier-specific setup logic."""
123
130
  pass
124
131
 
@@ -136,6 +143,8 @@ class BaseVerifier(ABC):
136
143
  return
137
144
 
138
145
  try:
146
+ if self.extractor:
147
+ await self.extractor.cleanup()
139
148
  self._batch_processor = BatchProcessor()
140
149
  await self._cleanup()
141
150
  logger.info(f"{self.__class__.__name__} cleaned up successfully")
@@ -191,15 +200,28 @@ class BaseVerifier(ABC):
191
200
  start_time = time.time()
192
201
 
193
202
  while attempt < self._max_retries:
203
+ # Extract verifiable part of the proposed solution,
204
+ # if verifier has been initialized with extractor.
205
+ verifiable_solution = (
206
+ await self.extractor.extract(solution)
207
+ if self.extractor
208
+ else solution
209
+ )
210
+
211
+ if not verifiable_solution:
212
+ continue
213
+
194
214
  try:
195
215
  verification_result = (
196
216
  await asyncio.wait_for(
197
- self._verify_implementation(solution, ground_truth),
217
+ self._verify_implementation(
218
+ verifiable_solution, ground_truth
219
+ ),
198
220
  timeout=self._timeout,
199
221
  )
200
222
  if self._timeout
201
223
  else await self._verify_implementation(
202
- solution, ground_truth
224
+ verifiable_solution, ground_truth
203
225
  )
204
226
  )
205
227
 
@@ -267,6 +289,7 @@ class BaseVerifier(ABC):
267
289
  "Subclasses must implement _verify_implementation()"
268
290
  )
269
291
 
292
+ # TODO: check again
270
293
  async def verify_batch(
271
294
  self,
272
295
  solutions: List[str],