camel-ai 0.2.20a1__py3-none-any.whl → 0.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.20a1'
17
+ __version__ = '0.2.21'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -573,9 +573,8 @@ class ChatAgent(BaseAgent):
573
573
  self.model_backend.model_config_dict.get("response_format")
574
574
  and response_format
575
575
  ):
576
- raise ValueError(
577
- "The `response_format` parameter cannot be set both in "
578
- "the model configuration and in the ChatAgent step."
576
+ logger.warning(
577
+ f"Overriding the response format with {response_format}."
579
578
  )
580
579
 
581
580
  self.original_model_dict = self.model_backend.model_config_dict
@@ -165,11 +165,7 @@ class KnowledgeGraphAgent(ChatAgent):
165
165
  task=str(element)
166
166
  )
167
167
 
168
- knowledge_graph_generation_msg = BaseMessage.make_user_message(
169
- role_name="Graphify", content=knowledge_graph_generation
170
- )
171
-
172
- response = self.step(input_message=knowledge_graph_generation_msg)
168
+ response = self.step(input_message=knowledge_graph_generation)
173
169
 
174
170
  content = response.msg.content
175
171
 
@@ -24,7 +24,6 @@ from tree_sitter import Language, Parser
24
24
 
25
25
  from camel.agents import ChatAgent
26
26
  from camel.benchmarks.base import BaseBenchmark
27
- from camel.messages import BaseMessage
28
27
  from camel.utils import download_github_subdirectory
29
28
 
30
29
  logger = logging.getLogger(__name__)
@@ -281,12 +280,9 @@ class APIBenchBenchmark(BaseBenchmark):
281
280
  with open(self.save_to, "w") as f:
282
281
  for question in tqdm(datas, desc="Running"):
283
282
  prompt = encode_question(question["text"], dataset_name)
284
- msg = BaseMessage.make_user_message(
285
- role_name="User", content=prompt
286
- )
287
283
  try:
288
284
  # Generate response
289
- responses = agent.step(msg)
285
+ responses = agent.step(prompt)
290
286
  response = responses.msgs[0].content
291
287
  api_database = self._data['api']
292
288
  qa_pairs = self._data['eval']
camel/benchmarks/nexus.py CHANGED
@@ -28,7 +28,6 @@ from tqdm import tqdm
28
28
 
29
29
  from camel.agents import ChatAgent
30
30
  from camel.benchmarks.base import BaseBenchmark
31
- from camel.messages import BaseMessage
32
31
 
33
32
  logger = logging.getLogger(__name__)
34
33
 
@@ -309,13 +308,10 @@ class NexusBenchmark(BaseBenchmark):
309
308
  with open(self.save_to, "w") as f:
310
309
  for sample in tqdm(datas, desc="Running"):
311
310
  prompt = construct_prompt(input=sample.input, tools=tools)
312
- msg = BaseMessage.make_user_message(
313
- role_name="User", content=prompt
314
- )
315
311
  ground_truth_call = sample.output
316
312
  try:
317
313
  # Generate response
318
- response = agent.step(msg)
314
+ response = agent.step(prompt)
319
315
  agent_call = response.msgs[0].content
320
316
 
321
317
  # Evaluate response
@@ -188,8 +188,8 @@ def ragas_evaluate_dataset(
188
188
  Returns:
189
189
  Dataset: Dataset with added evaluation metrics.
190
190
  """
191
- from ragas import evaluate
192
- from ragas.metrics import ( # type: ignore[import-untyped]
191
+ from ragas import evaluate # type: ignore[import]
192
+ from ragas.metrics import ( # type: ignore[import]
193
193
  context_relevancy,
194
194
  faithfulness,
195
195
  )
@@ -15,7 +15,6 @@ import os
15
15
  from typing import TYPE_CHECKING, Optional
16
16
 
17
17
  from camel.agents import ChatAgent
18
- from camel.messages import BaseMessage
19
18
  from camel.utils import dependencies_required
20
19
 
21
20
  # Conditionally import telebot types only for type checking
@@ -74,9 +73,6 @@ class TelegramBot:
74
73
  if not message.text:
75
74
  return
76
75
 
77
- user_msg = BaseMessage.make_user_message(
78
- role_name="User", content=message.text
79
- )
80
- assistant_response = self.chat_agent.step(user_msg)
76
+ assistant_response = self.chat_agent.step(message.text)
81
77
 
82
78
  self.bot.reply_to(message, assistant_response.msg.content)
camel/configs/__init__.py CHANGED
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from .aiml_config import AIML_API_PARAMS, AIMLConfig
14
15
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
16
  from .base_config import BaseConfig
16
17
  from .cohere_config import COHERE_API_PARAMS, CohereConfig
@@ -85,4 +86,6 @@ __all__ = [
85
86
  "MOONSHOT_API_PARAMS",
86
87
  'SiliconFlowConfig',
87
88
  'SILICONFLOW_API_PARAMS',
89
+ 'AIMLConfig',
90
+ 'AIML_API_PARAMS',
88
91
  ]
@@ -0,0 +1,80 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from __future__ import annotations
15
+
16
+ from typing import Sequence, Type, Union
17
+
18
+ from pydantic import BaseModel, Field
19
+
20
+ from camel.configs.base_config import BaseConfig
21
+ from camel.types import NOT_GIVEN, NotGiven
22
+
23
+
24
+ class AIMLConfig(BaseConfig):
25
+ r"""Defines the parameters for generating chat completions using the
26
+ AIML API.
27
+
28
+ Args:
29
+ temperature (float, optional): Determines the degree of randomness
30
+ in the response. (default: :obj:`0.7`)
31
+ top_p (float, optional): The top_p (nucleus) parameter is used to
32
+ dynamically adjust the number of choices for each predicted token
33
+ based on the cumulative probabilities. (default: :obj:`0.7`)
34
+ n (int, optional): Number of generations to return. (default::obj:`1`)
35
+ response_format (object, optional): An object specifying the format
36
+ that the model must output.
37
+ stream (bool, optional): If set, tokens are returned as Server-Sent
38
+ Events as they are made available. (default: :obj:`False`)
39
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
40
+ will stop generating further tokens. (default: :obj:`None`)
41
+ max_tokens (int, optional): The maximum number of tokens to generate.
42
+ (default: :obj:`None`)
43
+ logit_bias (dict, optional): Modify the likelihood of specified tokens
44
+ appearing in the completion. Accepts a json object that maps tokens
45
+ (specified by their token ID in the tokenizer) to an associated
46
+ bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
47
+ is added to the logits generated by the model prior to sampling.
48
+ The exact effect will vary per model, but values between:obj:` -1`
49
+ and :obj:`1` should decrease or increase likelihood of selection;
50
+ values like :obj:`-100` or :obj:`100` should result in a ban or
51
+ exclusive selection of the relevant token. (default: :obj:`{}`)
52
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
53
+ :obj:`2.0`. Positive values penalize new tokens based on their
54
+ existing frequency in the text so far, decreasing the model's
55
+ likelihood to repeat the same line verbatim. See more information
56
+ about frequency and presence penalties. (default: :obj:`0.0`)
57
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
58
+ :obj:`2.0`. Positive values penalize new tokens based on whether
59
+ they appear in the text so far, increasing the model's likelihood
60
+ to talk about new topics. See more information about frequency and
61
+ presence penalties. (default: :obj:`0.0`)
62
+ tools (list[FunctionTool], optional): A list of tools the model may
63
+ call. Currently, only functions are supported as a tool. Use this
64
+ to provide a list of functions the model may generate JSON inputs
65
+ for. A max of 128 functions are supported.
66
+ """
67
+
68
+ temperature: float = 0.7
69
+ top_p: float = 0.7
70
+ n: int = 1
71
+ stream: bool = False
72
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
73
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
74
+ logit_bias: dict = Field(default_factory=dict)
75
+ response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN
76
+ presence_penalty: float = 0.0
77
+ frequency_penalty: float = 0.0
78
+
79
+
80
+ AIML_API_PARAMS = {param for param in AIMLConfig.model_fields.keys()}
camel/datagen/__init__.py CHANGED
@@ -12,10 +12,12 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
- from .cotdatagen import CoTDataGenerator
15
+ from .cot_datagen import CoTDataGenerator
16
+ from .self_improving_cot import SelfImprovingCoTPipeline
16
17
  from .self_instruct import SelfInstructPipeline
17
18
 
18
19
  __all__ = [
19
20
  "CoTDataGenerator",
20
21
  "SelfInstructPipeline",
22
+ "SelfImprovingCoTPipeline",
21
23
  ]