camel-ai 0.2.36__py3-none-any.whl → 0.2.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (84) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/__init__.py +2 -0
  3. camel/agents/repo_agent.py +579 -0
  4. camel/configs/aiml_config.py +20 -19
  5. camel/configs/anthropic_config.py +25 -27
  6. camel/configs/cohere_config.py +11 -10
  7. camel/configs/deepseek_config.py +16 -16
  8. camel/configs/gemini_config.py +8 -8
  9. camel/configs/groq_config.py +18 -19
  10. camel/configs/internlm_config.py +8 -8
  11. camel/configs/litellm_config.py +26 -24
  12. camel/configs/mistral_config.py +8 -8
  13. camel/configs/moonshot_config.py +11 -11
  14. camel/configs/nvidia_config.py +13 -13
  15. camel/configs/ollama_config.py +14 -15
  16. camel/configs/openai_config.py +3 -3
  17. camel/configs/openrouter_config.py +9 -9
  18. camel/configs/qwen_config.py +8 -8
  19. camel/configs/reka_config.py +12 -11
  20. camel/configs/samba_config.py +14 -14
  21. camel/configs/sglang_config.py +15 -16
  22. camel/configs/siliconflow_config.py +18 -17
  23. camel/configs/togetherai_config.py +18 -19
  24. camel/configs/vllm_config.py +18 -19
  25. camel/configs/yi_config.py +7 -8
  26. camel/configs/zhipuai_config.py +8 -9
  27. camel/datagen/evol_instruct/__init__.py +20 -0
  28. camel/datagen/evol_instruct/evol_instruct.py +424 -0
  29. camel/datagen/evol_instruct/scorer.py +166 -0
  30. camel/datagen/evol_instruct/templates.py +268 -0
  31. camel/datasets/static_dataset.py +25 -23
  32. camel/environments/models.py +10 -1
  33. camel/environments/single_step.py +296 -136
  34. camel/extractors/__init__.py +16 -1
  35. camel/interpreters/docker_interpreter.py +1 -1
  36. camel/interpreters/e2b_interpreter.py +1 -1
  37. camel/interpreters/subprocess_interpreter.py +1 -1
  38. camel/loaders/__init__.py +2 -2
  39. camel/loaders/{panda_reader.py → pandas_reader.py} +61 -30
  40. camel/memories/context_creators/score_based.py +198 -67
  41. camel/models/aiml_model.py +9 -3
  42. camel/models/anthropic_model.py +11 -3
  43. camel/models/azure_openai_model.py +9 -3
  44. camel/models/base_audio_model.py +6 -0
  45. camel/models/base_model.py +4 -0
  46. camel/models/deepseek_model.py +9 -3
  47. camel/models/gemini_model.py +9 -3
  48. camel/models/groq_model.py +9 -3
  49. camel/models/internlm_model.py +8 -2
  50. camel/models/model_factory.py +4 -0
  51. camel/models/moonshot_model.py +8 -2
  52. camel/models/nemotron_model.py +9 -3
  53. camel/models/nvidia_model.py +9 -3
  54. camel/models/ollama_model.py +9 -3
  55. camel/models/openai_audio_models.py +5 -3
  56. camel/models/openai_compatible_model.py +9 -3
  57. camel/models/openai_model.py +9 -3
  58. camel/models/openrouter_model.py +9 -3
  59. camel/models/qwen_model.py +9 -3
  60. camel/models/samba_model.py +9 -3
  61. camel/models/sglang_model.py +11 -4
  62. camel/models/siliconflow_model.py +8 -2
  63. camel/models/stub_model.py +2 -1
  64. camel/models/togetherai_model.py +9 -3
  65. camel/models/vllm_model.py +9 -3
  66. camel/models/yi_model.py +9 -3
  67. camel/models/zhipuai_model.py +9 -3
  68. camel/retrievers/auto_retriever.py +14 -0
  69. camel/storages/__init__.py +2 -0
  70. camel/storages/vectordb_storages/__init__.py +2 -0
  71. camel/storages/vectordb_storages/tidb.py +332 -0
  72. camel/toolkits/__init__.py +7 -0
  73. camel/toolkits/browser_toolkit.py +84 -61
  74. camel/toolkits/openai_agent_toolkit.py +131 -0
  75. camel/toolkits/searxng_toolkit.py +207 -0
  76. camel/toolkits/thinking_toolkit.py +230 -0
  77. camel/types/enums.py +4 -0
  78. camel/utils/chunker/code_chunker.py +9 -15
  79. camel/verifiers/base.py +28 -5
  80. camel/verifiers/python_verifier.py +321 -68
  81. {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/METADATA +103 -8
  82. {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/RECORD +84 -75
  83. {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/WHEEL +0 -0
  84. {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,230 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ from typing import List, Optional
16
+
17
+ from camel.logger import get_logger
18
+ from camel.toolkits import FunctionTool
19
+ from camel.toolkits.base import BaseToolkit
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class ThinkingToolkit(BaseToolkit):
25
+ r"""A toolkit for recording thoughts during reasoning processes."""
26
+
27
+ def __init__(
28
+ self,
29
+ timeout: Optional[float] = None,
30
+ ):
31
+ r"""Initialize the ThinkingToolkit.
32
+
33
+ Args:
34
+ timeout (Optional[float]): The timeout for the toolkit.
35
+ (default: :obj: `None`)
36
+ """
37
+ super().__init__(timeout=timeout)
38
+ self.plans: List[str] = []
39
+ self.hypotheses: List[str] = []
40
+ self.thoughts: List[str] = []
41
+ self.contemplations: List[str] = []
42
+ self.critiques: List[str] = []
43
+ self.syntheses: List[str] = []
44
+ self.reflections: List[str] = []
45
+
46
+ def plan(self, plan: str) -> str:
47
+ r"""Use the tool to create a plan or strategy.
48
+ This tool is for outlining the approach or steps to be taken before
49
+ starting the actual thinking process.
50
+
51
+ Args:
52
+ plan (str): A forward-looking plan or strategy.
53
+
54
+ Returns:
55
+ str: The recorded plan.
56
+ """
57
+ try:
58
+ logger.debug(f"Plan: {plan}")
59
+ self.plans.append(plan)
60
+ return f"Plan: {plan}"
61
+
62
+ except Exception as e:
63
+ error_msg = f"Error recording plan: {e}"
64
+ logger.error(error_msg)
65
+ return error_msg
66
+
67
+ def hypothesize(self, hypothesis: str) -> str:
68
+ r"""Use the tool to form a hypothesis or make a prediction.
69
+ This tool is for making educated guesses or predictions based on
70
+ the plan, before detailed thinking.
71
+
72
+ Args:
73
+ hypothesis (str): A hypothesis or prediction to test.
74
+
75
+ Returns:
76
+ str: The recorded hypothesis.
77
+ """
78
+ try:
79
+ logger.debug(f"Hypothesis: {hypothesis}")
80
+ if not self.plans:
81
+ return "Consider creating a plan before forming hypotheses."
82
+ self.hypotheses.append(hypothesis)
83
+ return f"Hypothesis: {hypothesis}"
84
+
85
+ except Exception as e:
86
+ error_msg = f"Error recording hypothesis: {e}"
87
+ logger.error(error_msg)
88
+ return error_msg
89
+
90
+ def think(self, thought: str) -> str:
91
+ r"""Use the tool to think about something.
92
+ It will not obtain new information or change the database, but just
93
+ append the thought to the log. Use it for initial thoughts and
94
+ observations during the execution of the plan.
95
+
96
+ Args:
97
+ thought (str): A thought to think about.
98
+
99
+ Returns:
100
+ str: The recorded thought.
101
+ """
102
+ try:
103
+ logger.debug(f"Thought: {thought}")
104
+ if not self.plans:
105
+ return (
106
+ "Consider creating a plan before thinking "
107
+ "through the process."
108
+ )
109
+ self.thoughts.append(thought)
110
+ return f"Thought: {thought}"
111
+
112
+ except Exception as e:
113
+ error_msg = f"Error recording thought: {e}"
114
+ logger.error(error_msg)
115
+ return error_msg
116
+
117
+ def contemplate(self, contemplation: str) -> str:
118
+ r"""Use the tool to deeply contemplate an idea or concept.
119
+ This tool is for deeper, more thorough exploration of thoughts,
120
+ considering multiple perspectives and implications. It's more
121
+ comprehensive than basic thinking but more focused than reflection.
122
+
123
+ Args:
124
+ contemplation (str): A deeper exploration of thoughts or concepts.
125
+
126
+ Returns:
127
+ str: The recorded contemplation.
128
+ """
129
+ try:
130
+ logger.debug(f"Contemplation: {contemplation}")
131
+ if not self.thoughts:
132
+ return (
133
+ "Consider thinking about the topic before "
134
+ "deep contemplation."
135
+ )
136
+ self.contemplations.append(contemplation)
137
+ return f"Contemplation: {contemplation}"
138
+
139
+ except Exception as e:
140
+ error_msg = f"Error recording contemplation: {e}"
141
+ logger.error(error_msg)
142
+ return error_msg
143
+
144
+ def critique(self, critique: str) -> str:
145
+ r"""Use the tool to critically evaluate current thoughts.
146
+ This tool is for identifying potential flaws, biases, or
147
+ weaknesses in the current thinking process.
148
+
149
+ Args:
150
+ critique (str): A critical evaluation of current thoughts.
151
+
152
+ Returns:
153
+ str: The recorded critique.
154
+ """
155
+ try:
156
+ logger.debug(f"Critique: {critique}")
157
+ if not self.contemplations:
158
+ return "Consider contemplating deeply before critiquing."
159
+ self.critiques.append(critique)
160
+ return f"Critique: {critique}"
161
+
162
+ except Exception as e:
163
+ error_msg = f"Error recording critique: {e}"
164
+ logger.error(error_msg)
165
+ return error_msg
166
+
167
+ def synthesize(self, synthesis: str) -> str:
168
+ r"""Use the tool to combine and integrate various thoughts.
169
+ This tool is for bringing together different thoughts, contemplations,
170
+ and critiques into a coherent understanding.
171
+
172
+ Args:
173
+ synthesis (str): An integration of multiple thoughts and insights.
174
+
175
+ Returns:
176
+ str: The recorded synthesis.
177
+ """
178
+ try:
179
+ logger.debug(f"Synthesis: {synthesis}")
180
+ if not self.critiques:
181
+ return "Consider critiquing thoughts before synthesizing."
182
+ self.syntheses.append(synthesis)
183
+ return f"Synthesis: {synthesis}"
184
+
185
+ except Exception as e:
186
+ error_msg = f"Error recording synthesis: {e}"
187
+ logger.error(error_msg)
188
+ return error_msg
189
+
190
+ def reflect(self, reflection: str) -> str:
191
+ r"""Use the tool to reflect on the entire process.
192
+ This tool is for final evaluation of the entire thinking process,
193
+ including plans, hypotheses, thoughts, contemplations, critiques,
194
+ and syntheses.
195
+
196
+ Args:
197
+ reflection (str): A comprehensive reflection on the process.
198
+
199
+ Returns:
200
+ str: The recorded reflection.
201
+ """
202
+ try:
203
+ logger.debug(f"Reflection: {reflection}")
204
+ if not self.syntheses:
205
+ return (
206
+ "Consider synthesizing insights before final reflection."
207
+ )
208
+ self.reflections.append(reflection)
209
+ return f"Reflection: {reflection}"
210
+
211
+ except Exception as e:
212
+ error_msg = f"Error recording reflection: {e}"
213
+ logger.error(error_msg)
214
+ return error_msg
215
+
216
+ def get_tools(self) -> List[FunctionTool]:
217
+ r"""Get all tools in the toolkit.
218
+
219
+ Returns:
220
+ List[FunctionTool]: A list of tools.
221
+ """
222
+ return [
223
+ FunctionTool(self.plan),
224
+ FunctionTool(self.hypothesize),
225
+ FunctionTool(self.think),
226
+ FunctionTool(self.contemplate),
227
+ FunctionTool(self.critique),
228
+ FunctionTool(self.synthesize),
229
+ FunctionTool(self.reflect),
230
+ ]
camel/types/enums.py CHANGED
@@ -123,6 +123,7 @@ class ModelType(UnifiedModelType, Enum):
123
123
  NVIDIA_LLAMA3_3_70B_INSTRUCT = "meta/llama-3.3-70b-instruct"
124
124
 
125
125
  # Gemini models
126
+ GEMINI_2_5_PRO_EXP = "gemini-2.5-pro-exp-03-25"
126
127
  GEMINI_2_0_FLASH = "gemini-2.0-flash-exp"
127
128
  GEMINI_2_0_FLASH_THINKING = "gemini-2.0-flash-thinking-exp"
128
129
  GEMINI_2_0_PRO_EXP = "gemini-2.0-pro-exp-02-05"
@@ -420,6 +421,7 @@ class ModelType(UnifiedModelType, Enum):
420
421
  bool: Whether this type of models is gemini.
421
422
  """
422
423
  return self in {
424
+ ModelType.GEMINI_2_5_PRO_EXP,
423
425
  ModelType.GEMINI_2_0_FLASH,
424
426
  ModelType.GEMINI_1_5_FLASH,
425
427
  ModelType.GEMINI_1_5_PRO,
@@ -724,6 +726,7 @@ class ModelType(UnifiedModelType, Enum):
724
726
  }:
725
727
  return 256_000
726
728
  elif self in {
729
+ ModelType.GEMINI_2_5_PRO_EXP,
727
730
  ModelType.GEMINI_2_0_FLASH,
728
731
  ModelType.GEMINI_1_5_FLASH,
729
732
  ModelType.GEMINI_1_5_PRO,
@@ -879,6 +882,7 @@ class OpenAIVisionDetailType(Enum):
879
882
  class StorageType(Enum):
880
883
  MILVUS = "milvus"
881
884
  QDRANT = "qdrant"
885
+ TIDB = "tidb"
882
886
 
883
887
 
884
888
  class OpenAPIName(Enum):
@@ -16,9 +16,7 @@ from typing import List, Optional
16
16
 
17
17
  from unstructured.documents.elements import Element, ElementMetadata
18
18
 
19
- from camel.messages import OpenAIUserMessage
20
- from camel.types import ModelType
21
- from camel.utils import BaseTokenCounter, OpenAITokenCounter
19
+ from camel.utils import get_model_encoding
22
20
 
23
21
  from .base import BaseChunker
24
22
 
@@ -38,20 +36,18 @@ class CodeChunker(BaseChunker):
38
36
  token counting, if `None`, OpenAITokenCounter will be used.
39
37
  (default: :obj:`None`)
40
38
  remove_image: (bool, optional): If the chunker should skip the images.
39
+ model_name (str, optional): The tokenizer model name used
40
+ for token counting. (default: :obj:`"cl100k_base"`)
41
41
  """
42
42
 
43
43
  def __init__(
44
44
  self,
45
45
  chunk_size: int = 8192,
46
- token_counter: Optional[BaseTokenCounter] = None,
46
+ model_name: str = "cl100k_base",
47
47
  remove_image: Optional[bool] = True,
48
48
  ):
49
49
  self.chunk_size = chunk_size
50
- self.token_counter = (
51
- token_counter
52
- if token_counter
53
- else OpenAITokenCounter(model=ModelType.GPT_4O_MINI)
54
- )
50
+ self.tokenizer = get_model_encoding(model_name)
55
51
  self.remove_image = remove_image
56
52
  self.struct_pattern = re.compile(
57
53
  r'^\s*(?:(def|class|function)\s+\w+|'
@@ -72,9 +68,7 @@ class CodeChunker(BaseChunker):
72
68
  Returns:
73
69
  int: The number of tokens in the input text.
74
70
  """
75
- return self.token_counter.count_tokens_from_messages(
76
- [OpenAIUserMessage(role="user", name="user", content=text)]
77
- )
71
+ return len(self.tokenizer.encode(text, disallowed_special=()))
78
72
 
79
73
  def _split_oversized(self, line: str) -> List[str]:
80
74
  r"""Splits an oversized line into multiple chunks based on token limits
@@ -86,7 +80,7 @@ class CodeChunker(BaseChunker):
86
80
  List[str]: A list of smaller chunks after splitting the
87
81
  oversized line.
88
82
  """
89
- tokens = self.token_counter.encode(line)
83
+ tokens = self.tokenizer.encode(line, disallowed_special=())
90
84
  chunks = []
91
85
  buffer = []
92
86
  current_count = 0
@@ -96,12 +90,12 @@ class CodeChunker(BaseChunker):
96
90
  current_count += 1
97
91
 
98
92
  if current_count >= self.chunk_size:
99
- chunks.append(self.token_counter.decode(buffer).strip())
93
+ chunks.append(self.tokenizer.decode(buffer).strip())
100
94
  buffer = []
101
95
  current_count = 0
102
96
 
103
97
  if buffer:
104
- chunks.append(self.token_counter.decode(buffer))
98
+ chunks.append(self.tokenizer.decode(buffer))
105
99
  return chunks
106
100
 
107
101
  def chunk(self, content: List[str]) -> List[Element]:
camel/verifiers/base.py CHANGED
@@ -16,6 +16,7 @@ import time
16
16
  from abc import ABC, abstractmethod
17
17
  from typing import List, Optional
18
18
 
19
+ from camel.extractors.base import BaseExtractor
19
20
  from camel.logger import get_logger
20
21
  from camel.utils import BatchProcessor
21
22
 
@@ -44,6 +45,7 @@ class BaseVerifier(ABC):
44
45
 
45
46
  def __init__(
46
47
  self,
48
+ extractor: Optional[BaseExtractor] = None,
47
49
  max_parallel: Optional[int] = None,
48
50
  timeout: Optional[float] = None,
49
51
  max_retries: int = 3,
@@ -72,6 +74,9 @@ class BaseVerifier(ABC):
72
74
  down. (default: :obj:`85.0`)
73
75
  **kwargs: Additional verifier parameters.
74
76
  """
77
+
78
+ self.extractor = extractor
79
+
75
80
  self._is_setup: bool = False
76
81
  self._max_parallel: Optional[int] = max_parallel
77
82
  self._timeout: Optional[float] = timeout
@@ -82,7 +87,7 @@ class BaseVerifier(ABC):
82
87
  self._memory_threshold: float = memory_threshold
83
88
  self._batch_processor: BatchProcessor = BatchProcessor()
84
89
 
85
- async def setup(self) -> None:
90
+ async def setup(self, **kwargs) -> None:
86
91
  r"""Set up the verifier with necessary resources.
87
92
 
88
93
  Initializes:
@@ -97,6 +102,8 @@ class BaseVerifier(ABC):
97
102
  return
98
103
 
99
104
  try:
105
+ if self.extractor:
106
+ await self.extractor.setup()
100
107
  batch_size = max(1, self._initial_batch_size or 10)
101
108
  max_parallel = max(1, self._max_parallel or 1)
102
109
  self._batch_processor = BatchProcessor()
@@ -106,7 +113,7 @@ class BaseVerifier(ABC):
106
113
  f"batch_size={batch_size}, max_parallel={max_parallel}"
107
114
  )
108
115
 
109
- await self._setup()
116
+ await self._setup(**kwargs)
110
117
  self._is_setup = True
111
118
 
112
119
  except Exception as e:
@@ -118,7 +125,7 @@ class BaseVerifier(ABC):
118
125
  raise RuntimeError(error_msg) from e
119
126
 
120
127
  @abstractmethod
121
- async def _setup(self) -> None:
128
+ async def _setup(self, **kwargs) -> None:
122
129
  r"""Implement verifier-specific setup logic."""
123
130
  pass
124
131
 
@@ -136,6 +143,8 @@ class BaseVerifier(ABC):
136
143
  return
137
144
 
138
145
  try:
146
+ if self.extractor:
147
+ await self.extractor.cleanup()
139
148
  self._batch_processor = BatchProcessor()
140
149
  await self._cleanup()
141
150
  logger.info(f"{self.__class__.__name__} cleaned up successfully")
@@ -191,15 +200,28 @@ class BaseVerifier(ABC):
191
200
  start_time = time.time()
192
201
 
193
202
  while attempt < self._max_retries:
203
+ # Extract verifiable part of the proposed solution,
204
+ # if verifier has been initialized with extractor.
205
+ verifiable_solution = (
206
+ await self.extractor.extract(solution)
207
+ if self.extractor
208
+ else solution
209
+ )
210
+
211
+ if not verifiable_solution:
212
+ continue
213
+
194
214
  try:
195
215
  verification_result = (
196
216
  await asyncio.wait_for(
197
- self._verify_implementation(solution, ground_truth),
217
+ self._verify_implementation(
218
+ verifiable_solution, ground_truth
219
+ ),
198
220
  timeout=self._timeout,
199
221
  )
200
222
  if self._timeout
201
223
  else await self._verify_implementation(
202
- solution, ground_truth
224
+ verifiable_solution, ground_truth
203
225
  )
204
226
  )
205
227
 
@@ -267,6 +289,7 @@ class BaseVerifier(ABC):
267
289
  "Subclasses must implement _verify_implementation()"
268
290
  )
269
291
 
292
+ # TODO: check again
270
293
  async def verify_batch(
271
294
  self,
272
295
  solutions: List[str],