camel-ai 0.2.17__py3-none-any.whl → 0.2.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.17'
17
+ __version__ = '0.2.19'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -579,13 +579,18 @@ class ChatAgent(BaseAgent):
579
579
  )
580
580
 
581
581
  self.original_model_dict = self.model_backend.model_config_dict
582
- if response_format and self.model_type in {"gpt-4o", "gpt-4o-mini"}:
582
+ model_response_format_modified = False
583
+ if (
584
+ response_format
585
+ and self.model_type.support_native_structured_output
586
+ ):
583
587
  self.model_backend.model_config_dict = (
584
588
  self.original_model_dict.copy()
585
589
  )
586
590
  self.model_backend.model_config_dict["response_format"] = (
587
591
  response_format
588
592
  )
593
+ model_response_format_modified = True
589
594
 
590
595
  # Convert input message to BaseMessage if necessary
591
596
  if isinstance(input_message, str):
@@ -604,7 +609,12 @@ class ChatAgent(BaseAgent):
604
609
  # Add user input to memory
605
610
  self.update_memory(input_message, OpenAIBackendRole.USER)
606
611
 
607
- return self._handle_step(response_format, self.single_iteration)
612
+ try:
613
+ return self._handle_step(response_format, self.single_iteration)
614
+ finally:
615
+ if model_response_format_modified:
616
+ # Reset model config back to original state
617
+ self.model_backend.model_config_dict = self.original_model_dict
608
618
 
609
619
  def _inject_tool_prompt(self) -> None:
610
620
  r"""Generate and add the tool prompt to memory."""
@@ -22,17 +22,36 @@ from camel.agents.programmed_agent_instruction import (
22
22
  ProgrammedAgentInstructionResult,
23
23
  programmable_capability,
24
24
  )
25
- from camel.messages import BaseMessage
26
- from camel.synthetic_datagen.source2synth.models import (
25
+ from camel.datagen.source2synth.models import (
27
26
  ContextPrompt,
28
27
  MultiHopQA,
29
28
  )
29
+ from camel.messages import BaseMessage
30
30
 
31
31
 
32
32
  class MultiHopGeneratorAgent(ProgrammableChatAgent):
33
+ r"""An agent specialized in generating multi-hop question-answer pairs.
34
+
35
+ This agent is designed to create complex questions that require multiple
36
+ steps of reasoning to answer. It analyzes context to identify related
37
+ facts and generates questions that require connecting these facts
38
+ logically.
39
+
40
+ Attributes:
41
+ model_config (ConfigDict): Configuration for model behavior.
42
+ system_message (BaseMessage): System message defining agent's role and
43
+ instructions.
44
+ """
45
+
33
46
  model_config = ConfigDict(arbitrary_types_allowed=True)
34
47
 
35
- def __init__(self, **kwargs: Any):
48
+ def __init__(self, **kwargs: Any) -> None:
49
+ r"""Initialize the MultiHopGeneratorAgent.
50
+
51
+ Args:
52
+ **kwargs (Any): Additional keyword arguments to pass to parent
53
+ class.
54
+ """
36
55
  super().__init__(**kwargs)
37
56
 
38
57
  system_text: str = textwrap.dedent(
@@ -64,6 +83,19 @@ class MultiHopGeneratorAgent(ProgrammableChatAgent):
64
83
  def generate_multi_hop_qa(
65
84
  self, context: str
66
85
  ) -> ProgrammedAgentInstructionResult[MultiHopQA]:
86
+ r"""Generate a multi-hop question-answer pair from given context.
87
+
88
+ Args:
89
+ context (str): The input text context to generate QA from.
90
+
91
+ Returns:
92
+ ProgrammedAgentInstructionResult[MultiHopQA]: Result containing the
93
+ generated question, reasoning steps, answer, and supporting
94
+ facts.
95
+
96
+ Raises:
97
+ RuntimeError: If the agent fails to generate a response.
98
+ """
67
99
  context_prompt = ContextPrompt(
68
100
  main_context=context, related_contexts=None
69
101
  )
@@ -26,6 +26,16 @@ T = TypeVar('T')
26
26
 
27
27
 
28
28
  class ProgrammableAgentRequirement(Enum):
29
+ r"""Requirements for programmable agent state.
30
+
31
+ Defines the possible requirements that can be used to repair the state
32
+ of a programmable agent.
33
+
34
+ Attributes:
35
+ LAST_MESSAGE_NOT_USER (str): Requires that the last message in the
36
+ conversation was not from the user.
37
+ """
38
+
29
39
  LAST_MESSAGE_NOT_USER = "LAST_MESSAGE_NOT_USER"
30
40
 
31
41
 
@@ -34,6 +44,11 @@ class ProgrammedAgentInstructionResult(BaseModel, Generic[T]):
34
44
 
35
45
  Contains the messages exchanged during execution and the computed value.
36
46
  The value type is specified by the generic type parameter T.
47
+
48
+ Attributes:
49
+ user_message (BaseMessage): The message sent by the user.
50
+ agent_message (BaseMessage): The message sent by the agent.
51
+ value (T): The computed result value of type T.
37
52
  """
38
53
 
39
54
  user_message: BaseMessage
@@ -48,8 +63,7 @@ class AbstractProgrammableAgent(abc.ABC):
48
63
 
49
64
  A programmable agent is an agent that can be programmed to perform a
50
65
  specific function or task. This class defines the interface for a
51
- programmable
52
- agent.
66
+ programmable agent.
53
67
 
54
68
  These methods should be implemented in order to ensure the agent supports
55
69
  the necessary guarantees to enable a programming interface while
@@ -68,16 +82,15 @@ class AbstractProgrammableAgent(abc.ABC):
68
82
  An atomic operation is an operation that is guaranteed to
69
83
  be executed without interruption by any other operation.
70
84
 
71
- If the operation fails or times out the agents state should be
72
- unchanged.
85
+ Args:
86
+ callback (Callable[[], ProgrammedAgentInstructionResult[T]]): The
87
+ operation to execute atomically.
73
88
 
74
- If an operation is already in progress, this method should throw an
75
- exception. (It is up to the caller to do any queuing)
89
+ Returns:
90
+ ProgrammedAgentInstructionResult[T]: The result of the operation.
76
91
 
77
- If the agent is in a state where it can perform the operation,
78
- it must leave the agent in a state where it can perform the
79
- operation again. Though if state changes in successful operation
80
- improve its ability to perform the operation, it should keep them.
92
+ Raises:
93
+ RuntimeError: If an operation is already in progress.
81
94
  """
82
95
  raise NotImplementedError
83
96
 
@@ -86,10 +99,13 @@ class AbstractProgrammableAgent(abc.ABC):
86
99
  r"""Repair the state of the agent.
87
100
 
88
101
  Agents may have other non-atomic interfaces, such as a user interface,
89
- or chat between other agents.
102
+ or chat between other agents. This method should restore the agent to
103
+ a state where it can perform operations according to the specified
104
+ requirement.
90
105
 
91
- This method should restore the agent to a state where it can perform
92
- operations according to the specified requirement.
106
+ Args:
107
+ requirement (ProgrammableAgentRequirement): The requirement to
108
+ repair the state for.
93
109
  """
94
110
  raise NotImplementedError
95
111
 
@@ -99,10 +115,16 @@ def programmable_capability(
99
115
  ) -> Callable[..., ProgrammedAgentInstructionResult[T]]:
100
116
  r"""Decorator for programmable agent capabilities.
101
117
 
102
- Wraps a method to ensure it is executed atomically via the agent's
103
- run_atomic interface.
104
- The decorated method must return a ProgrammedAgentInstructionResult with
105
- appropriate type parameter.
118
+ This decorator ensures that the decorated method is executed atomically
119
+ and maintains the agent's state guarantees.
120
+
121
+ Args:
122
+ func (Callable[..., ProgrammedAgentInstructionResult[T]]): The method
123
+ to decorate.
124
+
125
+ Returns:
126
+ Callable[..., ProgrammedAgentInstructionResult[T]]: The decorated
127
+ method that ensures atomic execution.
106
128
  """
107
129
 
108
130
  @wraps(func)
@@ -120,9 +142,20 @@ class ProgrammableChatAgent(ChatAgent, AbstractProgrammableAgent):
120
142
  Provides a default implementation of atomic execution using threading locks
121
143
  and basic state tracking for message roles. Implementing classes need to
122
144
  provide specific repair logic for their use cases.
145
+
146
+ Attributes:
147
+ _operation_lock (threading.Lock): Lock for ensuring atomic operations.
148
+ _last_message_role (Optional[str]): Role of the last message in the
149
+ conversation.
123
150
  """
124
151
 
125
- def __init__(self, **kwargs: Any):
152
+ def __init__(self, **kwargs: Any) -> None:
153
+ r"""Initialize the ProgrammableChatAgent.
154
+
155
+ Args:
156
+ **kwargs (Any): Additional keyword arguments to pass to parent
157
+ class.
158
+ """
126
159
  super().__init__(**kwargs)
127
160
  self._operation_lock = threading.Lock()
128
161
  self._last_message_role: Optional[str] = None
@@ -130,6 +163,20 @@ class ProgrammableChatAgent(ChatAgent, AbstractProgrammableAgent):
130
163
  def run_atomic(
131
164
  self, callback: Callable[[], ProgrammedAgentInstructionResult[T]]
132
165
  ) -> ProgrammedAgentInstructionResult[T]:
166
+ r"""Run an atomic operation on the agent.
167
+
168
+ Ensures thread-safe execution of the callback function by using a lock.
169
+
170
+ Args:
171
+ callback (Callable[[], ProgrammedAgentInstructionResult[T]]): The
172
+ operation to execute atomically.
173
+
174
+ Returns:
175
+ ProgrammedAgentInstructionResult[T]: The result of the operation.
176
+
177
+ Raises:
178
+ RuntimeError: If an operation is already in progress.
179
+ """
133
180
  if not self._operation_lock.acquire(blocking=False):
134
181
  raise RuntimeError("Operation already in progress")
135
182
 
@@ -141,6 +188,14 @@ class ProgrammableChatAgent(ChatAgent, AbstractProgrammableAgent):
141
188
  self._operation_lock.release()
142
189
 
143
190
  def repair_state(self, requirement: ProgrammableAgentRequirement) -> None:
191
+ r"""Repair the state of the agent.
192
+
193
+ Implements basic state repair for message role requirements.
194
+
195
+ Args:
196
+ requirement (ProgrammableAgentRequirement): The requirement to
197
+ repair the state for.
198
+ """
144
199
  if requirement == ProgrammableAgentRequirement.LAST_MESSAGE_NOT_USER:
145
200
  if self._last_message_role == "user":
146
201
  raise NotImplementedError(
@@ -30,7 +30,7 @@ class DeepSeekConfig(BaseConfig):
30
30
  temperature (float, optional): Sampling temperature to use, between
31
31
  :obj:`0` and :obj:`2`. Higher values make the output more random,
32
32
  while lower values make it more focused and deterministic.
33
- (default: :obj:`0.2`)
33
+ (default: :obj:`1.0`)
34
34
  top_p (float, optional): Controls the diversity and focus of the
35
35
  generated results. Higher values make the output more diverse,
36
36
  while lower values make it more focused. (default: :obj:`1.0`)
@@ -86,7 +86,7 @@ class DeepSeekConfig(BaseConfig):
86
86
  :obj:`True`)
87
87
  """
88
88
 
89
- temperature: float = 0.2 # deepseek default: 1.0
89
+ temperature: float = 1.0 # deepseek default: 1.0
90
90
  top_p: float = 1.0
91
91
  stream: bool = False
92
92
  stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
@@ -83,7 +83,7 @@ class GeminiConfig(BaseConfig):
83
83
  stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
84
84
  max_tokens: Union[int, NotGiven] = NOT_GIVEN
85
85
  response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN
86
- tool_choice: Optional[Union[dict[str, str], str]] = None
86
+ tool_choice: Optional[Union[dict[str, str], str, NotGiven]] = NOT_GIVEN
87
87
 
88
88
  def as_dict(self) -> dict[str, Any]:
89
89
  r"""Convert the current configuration to a dictionary.
@@ -27,7 +27,7 @@ class InternLMConfig(BaseConfig):
27
27
  (default: :obj:`False`)
28
28
  temperature (float, optional): Controls the diversity and focus of
29
29
  the generated results. Lower values make the output more focused,
30
- while higher values make it more diverse. (default: :obj:`0.3`)
30
+ while higher values make it more diverse. (default: :obj:`0.8`)
31
31
  top_p (float, optional): Controls the diversity and focus of the
32
32
  generated results. Higher values make the output more diverse,
33
33
  while lower values make it more focused. (default: :obj:`0.9`)
@@ -56,6 +56,10 @@ class SGLangConfig(BaseConfig):
56
56
  in the chat completion. The total length of input tokens and
57
57
  generated tokens is limited by the model's context length.
58
58
  (default: :obj:`None`)
59
+ tools (list[FunctionTool], optional): A list of tools the model may
60
+ call. Currently, only functions are supported as a tool. Use this
61
+ to provide a list of functions the model may generate JSON inputs
62
+ for. A max of 128 functions are supported.
59
63
  """
60
64
 
61
65
  stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
@@ -171,6 +171,7 @@ class SelfInstructPipeline:
171
171
  )
172
172
 
173
173
  response = self.agent.step(prompt)
174
+ self.agent.reset()
174
175
  generated_tasks = [
175
176
  line.strip()
176
177
  for line in response.msgs[0].content.split("\n")
@@ -197,6 +198,7 @@ class SelfInstructPipeline:
197
198
  "{\n \"answer\": false\n}\n"
198
199
  )
199
200
  response = self.agent.step(clf_prompt)
201
+ self.agent.reset()
200
202
  try:
201
203
  structured_response = AgentResponse.parse_raw(
202
204
  response.msgs[0].content.strip()
@@ -241,6 +243,7 @@ class SelfInstructPipeline:
241
243
  )
242
244
 
243
245
  response = self.agent.step(prompt)
246
+ self.agent.reset()
244
247
  generated_text = response.msgs[0].content.strip()
245
248
 
246
249
  if classification:
@@ -0,0 +1,31 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from .data_processor import (
15
+ DataCurator,
16
+ ExampleConstructor,
17
+ UserDataProcessor,
18
+ )
19
+ from .models import MultiHopQA, ReasoningStep
20
+ from .user_data_processor_config import (
21
+ ProcessorConfig,
22
+ )
23
+
24
+ __all__ = [
25
+ "DataCurator",
26
+ "ExampleConstructor",
27
+ "ProcessorConfig",
28
+ "UserDataProcessor",
29
+ "ReasoningStep",
30
+ "MultiHopQA",
31
+ ]