camel-ai 0.2.35__py3-none-any.whl → 0.2.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (59) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/__init__.py +2 -0
  3. camel/agents/repo_agent.py +579 -0
  4. camel/configs/__init__.py +3 -0
  5. camel/configs/aiml_config.py +20 -19
  6. camel/configs/anthropic_config.py +25 -27
  7. camel/configs/cohere_config.py +11 -10
  8. camel/configs/deepseek_config.py +16 -16
  9. camel/configs/gemini_config.py +8 -8
  10. camel/configs/groq_config.py +18 -19
  11. camel/configs/internlm_config.py +8 -8
  12. camel/configs/litellm_config.py +26 -24
  13. camel/configs/mistral_config.py +8 -8
  14. camel/configs/moonshot_config.py +11 -11
  15. camel/configs/nvidia_config.py +13 -13
  16. camel/configs/ollama_config.py +14 -15
  17. camel/configs/openai_config.py +3 -3
  18. camel/configs/openrouter_config.py +106 -0
  19. camel/configs/qwen_config.py +8 -8
  20. camel/configs/reka_config.py +12 -11
  21. camel/configs/samba_config.py +14 -14
  22. camel/configs/sglang_config.py +15 -16
  23. camel/configs/siliconflow_config.py +18 -17
  24. camel/configs/togetherai_config.py +18 -19
  25. camel/configs/vllm_config.py +18 -19
  26. camel/configs/yi_config.py +7 -8
  27. camel/configs/zhipuai_config.py +8 -9
  28. camel/datasets/few_shot_generator.py +2 -5
  29. camel/datasets/static_dataset.py +25 -23
  30. camel/environments/models.py +3 -0
  31. camel/environments/single_step.py +212 -132
  32. camel/extractors/__init__.py +16 -1
  33. camel/memories/agent_memories.py +2 -1
  34. camel/memories/blocks/chat_history_block.py +2 -1
  35. camel/models/__init__.py +2 -0
  36. camel/models/gemini_model.py +36 -0
  37. camel/models/groq_model.py +6 -3
  38. camel/models/model_factory.py +3 -0
  39. camel/models/openrouter_model.py +204 -0
  40. camel/storages/__init__.py +2 -0
  41. camel/storages/key_value_storages/__init__.py +2 -0
  42. camel/storages/key_value_storages/mem0_cloud.py +224 -0
  43. camel/storages/vectordb_storages/qdrant.py +3 -3
  44. camel/toolkits/__init__.py +2 -0
  45. camel/toolkits/browser_toolkit.py +43 -0
  46. camel/toolkits/code_execution.py +2 -1
  47. camel/toolkits/mcp_toolkit.py +30 -1
  48. camel/toolkits/thinking_toolkit.py +74 -0
  49. camel/types/enums.py +27 -0
  50. camel/types/unified_model_type.py +5 -0
  51. camel/utils/chunker/code_chunker.py +9 -15
  52. camel/verifiers/__init__.py +1 -2
  53. camel/verifiers/base.py +159 -99
  54. camel/verifiers/models.py +0 -12
  55. camel/verifiers/python_verifier.py +316 -60
  56. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/METADATA +54 -5
  57. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/RECORD +59 -54
  58. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/WHEEL +0 -0
  59. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/licenses/LICENSE +0 -0
@@ -18,7 +18,6 @@ from typing import Optional, Sequence, Union
18
18
  from pydantic import Field
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
- from camel.types import NOT_GIVEN, NotGiven
22
21
 
23
22
 
24
23
  # flake8: noqa: E501
@@ -32,14 +31,14 @@ class VLLMConfig(BaseConfig):
32
31
  temperature (float, optional): Sampling temperature to use, between
33
32
  :obj:`0` and :obj:`2`. Higher values make the output more random,
34
33
  while lower values make it more focused and deterministic.
35
- (default: :obj:`0.2`)
34
+ (default: :obj:`None`)
36
35
  top_p (float, optional): An alternative to sampling with temperature,
37
36
  called nucleus sampling, where the model considers the results of
38
37
  the tokens with top_p probability mass. So :obj:`0.1` means only
39
38
  the tokens comprising the top 10% probability mass are considered.
40
- (default: :obj:`1.0`)
39
+ (default: :obj:`None`)
41
40
  n (int, optional): How many chat completion choices to generate for
42
- each input message. (default: :obj:`1`)
41
+ each input message. (default: :obj:`None`)
43
42
  response_format (object, optional): An object specifying the format
44
43
  that the model must output. Compatible with GPT-4 Turbo and all
45
44
  GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
@@ -55,7 +54,7 @@ class VLLMConfig(BaseConfig):
55
54
  max context length.
56
55
  stream (bool, optional): If True, partial message deltas will be sent
57
56
  as data-only server-sent events as they become available.
58
- (default: :obj:`False`)
57
+ (default: :obj:`None`)
59
58
  stop (str or list, optional): Up to :obj:`4` sequences where the API
60
59
  will stop generating further tokens. (default: :obj:`None`)
61
60
  max_tokens (int, optional): The maximum number of tokens to generate
@@ -66,12 +65,12 @@ class VLLMConfig(BaseConfig):
66
65
  :obj:`2.0`. Positive values penalize new tokens based on whether
67
66
  they appear in the text so far, increasing the model's likelihood
68
67
  to talk about new topics. See more information about frequency and
69
- presence penalties. (default: :obj:`0.0`)
68
+ presence penalties. (default: :obj:`None`)
70
69
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
71
70
  :obj:`2.0`. Positive values penalize new tokens based on their
72
71
  existing frequency in the text so far, decreasing the model's
73
72
  likelihood to repeat the same line verbatim. See more information
74
- about frequency and presence penalties. (default: :obj:`0.0`)
73
+ about frequency and presence penalties. (default: :obj:`None`)
75
74
  logit_bias (dict, optional): Modify the likelihood of specified tokens
76
75
  appearing in the completion. Accepts a json object that maps tokens
77
76
  (specified by their token ID in the tokenizer) to an associated
@@ -80,10 +79,10 @@ class VLLMConfig(BaseConfig):
80
79
  The exact effect will vary per model, but values between:obj:` -1`
81
80
  and :obj:`1` should decrease or increase likelihood of selection;
82
81
  values like :obj:`-100` or :obj:`100` should result in a ban or
83
- exclusive selection of the relevant token. (default: :obj:`{}`)
82
+ exclusive selection of the relevant token. (default: :obj:`None`)
84
83
  user (str, optional): A unique identifier representing your end-user,
85
84
  which can help OpenAI to monitor and detect abuse.
86
- (default: :obj:`""`)
85
+ (default: :obj:`None`)
87
86
  logprobs: Whether to return log probabilities of the output tokens or
88
87
  not. If true, returns the log probabilities of each output token
89
88
  returned in the `logits` of `message`. (default: :obj:`None`)
@@ -93,17 +92,17 @@ class VLLMConfig(BaseConfig):
93
92
  this parameter is used. (default: :obj:`None`)
94
93
  """
95
94
 
96
- temperature: float = 0.2 # openai default: 1.0
97
- top_p: float = 1.0
98
- n: int = 1
99
- stream: bool = False
100
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
101
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
102
- presence_penalty: float = 0.0
103
- response_format: Union[dict, NotGiven] = NOT_GIVEN
104
- frequency_penalty: float = 0.0
95
+ temperature: Optional[float] = None # openai default: 1.0
96
+ top_p: Optional[float] = None
97
+ n: Optional[int] = None
98
+ stream: Optional[bool] = None
99
+ stop: Optional[Union[str, Sequence[str]]] = None
100
+ max_tokens: Optional[int] = None
101
+ presence_penalty: Optional[float] = None
102
+ response_format: Optional[dict] = None
103
+ frequency_penalty: Optional[float] = None
105
104
  logit_bias: dict = Field(default_factory=dict)
106
- user: str = ""
105
+ user: Optional[str] = None
107
106
  logprobs: Optional[bool] = None
108
107
  top_logprobs: Optional[int] = None
109
108
 
@@ -16,7 +16,6 @@ from __future__ import annotations
16
16
  from typing import Optional, Union
17
17
 
18
18
  from camel.configs.base_config import BaseConfig
19
- from camel.types import NOT_GIVEN, NotGiven
20
19
 
21
20
 
22
21
  class YiConfig(BaseConfig):
@@ -37,22 +36,22 @@ class YiConfig(BaseConfig):
37
36
  max_tokens (int, optional): Specifies the maximum number of tokens
38
37
  the model can generate. This sets an upper limit, but does not
39
38
  guarantee that this number will always be reached.
40
- (default: :obj:`5000`)
39
+ (default: :obj:`None`)
41
40
  top_p (float, optional): Controls the randomness of the generated
42
41
  results. Lower values lead to less randomness, while higher
43
- values increase randomness. (default: :obj:`0.9`)
42
+ values increase randomness. (default: :obj:`None`)
44
43
  temperature (float, optional): Controls the diversity and focus of
45
44
  the generated results. Lower values make the output more focused,
46
45
  while higher values make it more diverse. (default: :obj:`0.3`)
47
46
  stream (bool, optional): If True, enables streaming output.
48
- (default: :obj:`False`)
47
+ (default: :obj:`None`)
49
48
  """
50
49
 
51
50
  tool_choice: Optional[Union[dict[str, str], str]] = None
52
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
53
- top_p: float = 0.9
54
- temperature: float = 0.3
55
- stream: bool = False
51
+ max_tokens: Optional[int] = None
52
+ top_p: Optional[float] = None
53
+ temperature: Optional[float] = None
54
+ stream: Optional[bool] = None
56
55
 
57
56
 
58
57
  YI_API_PARAMS = {param for param in YiConfig.model_fields.keys()}
@@ -16,7 +16,6 @@ from __future__ import annotations
16
16
  from typing import Optional, Sequence, Union
17
17
 
18
18
  from camel.configs.base_config import BaseConfig
19
- from camel.types import NOT_GIVEN, NotGiven
20
19
 
21
20
 
22
21
  class ZhipuAIConfig(BaseConfig):
@@ -29,15 +28,15 @@ class ZhipuAIConfig(BaseConfig):
29
28
  temperature (float, optional): Sampling temperature to use, between
30
29
  :obj:`0` and :obj:`2`. Higher values make the output more random,
31
30
  while lower values make it more focused and deterministic.
32
- (default: :obj:`0.2`)
31
+ (default: :obj:`None`)
33
32
  top_p (float, optional): An alternative to sampling with temperature,
34
33
  called nucleus sampling, where the model considers the results of
35
34
  the tokens with top_p probability mass. So :obj:`0.1` means only
36
35
  the tokens comprising the top 10% probability mass are considered.
37
- (default: :obj:`0.6`)
36
+ (default: :obj:`None`)
38
37
  stream (bool, optional): If True, partial message deltas will be sent
39
38
  as data-only server-sent events as they become available.
40
- (default: :obj:`False`)
39
+ (default: :obj:`None`)
41
40
  stop (str or list, optional): Up to :obj:`4` sequences where the API
42
41
  will stop generating further tokens. (default: :obj:`None`)
43
42
  max_tokens (int, optional): The maximum number of tokens to generate
@@ -60,11 +59,11 @@ class ZhipuAIConfig(BaseConfig):
60
59
  are present.
61
60
  """
62
61
 
63
- temperature: float = 0.2
64
- top_p: float = 0.6
65
- stream: bool = False
66
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
67
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
62
+ temperature: Optional[float] = None
63
+ top_p: Optional[float] = None
64
+ stream: Optional[bool] = None
65
+ stop: Optional[Union[str, Sequence[str]]] = None
66
+ max_tokens: Optional[int] = None
68
67
  tool_choice: Optional[Union[dict[str, str], str]] = None
69
68
 
70
69
 
@@ -22,7 +22,6 @@ from camel.agents import ChatAgent
22
22
  from camel.logger import get_logger
23
23
  from camel.models.base_model import BaseModelBackend
24
24
  from camel.verifiers import BaseVerifier
25
- from camel.verifiers.models import VerifierInput
26
25
 
27
26
  from .base_generator import BaseGenerator
28
27
  from .models import DataPoint
@@ -203,10 +202,8 @@ class FewShotGenerator(BaseGenerator):
203
202
 
204
203
  try:
205
204
  verifier_response = await self.verifier.verify(
206
- VerifierInput(
207
- llm_response=rationale,
208
- ground_truth=None,
209
- )
205
+ solution=rationale,
206
+ ground_truth=None,
210
207
  )
211
208
  if not verifier_response or not verifier_response.result:
212
209
  raise ValueError(
@@ -153,17 +153,6 @@ class StaticDataset(Dataset):
153
153
  return None
154
154
 
155
155
  rationale = item.get('rationale')
156
- if not isinstance(rationale, str):
157
- if self._strict:
158
- raise ValueError(
159
- f"Sample at index {idx} has invalid 'rationale': "
160
- f"expected str, got {type(rationale)}"
161
- )
162
- else:
163
- logger.warning(
164
- f"Skipping sample at index {idx}: invalid 'rationale'"
165
- )
166
- return None
167
156
 
168
157
  final_answer = item.get('final_answer')
169
158
  if not isinstance(final_answer, str):
@@ -207,25 +196,33 @@ class StaticDataset(Dataset):
207
196
  r"""Return the size of the dataset."""
208
197
  return self._length
209
198
 
210
- def __getitem__(self, idx: int) -> DataPoint:
211
- r"""Retrieve a datapoint by index.
199
+ def __getitem__(
200
+ self, idx: Union[int, slice]
201
+ ) -> Union[DataPoint, List[DataPoint]]:
202
+ r"""Retrieve a datapoint or a batch of datapoints by index or slice.
212
203
 
213
204
  Args:
214
- idx (int): Index of the datapoint.
205
+ idx (Union[int, slice]): Index or slice of the datapoint(s).
215
206
 
216
207
  Returns:
217
- DataPoint: The datapoint corresponding to the given index.
208
+ List[DataPoint]: A list of `DataPoint` objects.
218
209
 
219
210
  Raises:
220
- IndexError: If :obj:`idx` is out of bounds (negative or greater
221
- than dataset length - 1).
211
+ IndexError: If an integer `idx` is out of bounds.
222
212
  """
213
+ if isinstance(idx, int):
214
+ if idx < 0 or idx >= self._length:
215
+ raise IndexError(
216
+ f"Index {idx} out of bounds for dataset "
217
+ f"of size {self._length}"
218
+ )
219
+ return self.data[idx]
223
220
 
224
- if idx < 0 or idx >= self._length:
225
- raise IndexError(
226
- f"Index {idx} out of bounds for dataset of size {self._length}"
227
- )
228
- return self.data[idx]
221
+ elif isinstance(idx, slice):
222
+ return self.data[idx.start : idx.stop : idx.step]
223
+
224
+ else:
225
+ raise TypeError(f"Indexing type {type(idx)} not supported.")
229
226
 
230
227
  def sample(self) -> DataPoint:
231
228
  r"""Sample a random datapoint from the dataset.
@@ -240,7 +237,12 @@ class StaticDataset(Dataset):
240
237
  if self._length == 0:
241
238
  raise RuntimeError("Dataset is empty, cannot sample.")
242
239
  idx = self._rng.randint(0, self._length - 1)
243
- return self[idx]
240
+ sample = self[idx]
241
+ if not isinstance(sample, DataPoint):
242
+ raise TypeError(
243
+ f"Expected DataPoint instance, got {type(sample).__name__}"
244
+ )
245
+ return sample
244
246
 
245
247
  @property
246
248
  def metadata(self) -> Dict[str, Any]:
@@ -33,6 +33,9 @@ class Action(BaseModel):
33
33
  generated (UTC).
34
34
  """
35
35
 
36
+ index: int = Field(
37
+ ..., description="Index of the state this action is performed upon"
38
+ )
36
39
  llm_response: str = Field(description="Generated response from the LLM")
37
40
  metadata: Dict[str, Any] = Field(
38
41
  default_factory=dict,