camel-ai 0.2.20a0__py3-none-any.whl → 0.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -98,15 +98,70 @@ class SubprocessInterpreter(BaseInterpreter):
98
98
  if not file.is_file():
99
99
  raise RuntimeError(f"{file} is not a file.")
100
100
  code_type = self._check_code_type(code_type)
101
- cmd = shlex.split(
102
- self._CODE_EXECUTE_CMD_MAPPING[code_type].format(
103
- file_name=str(file)
101
+ if code_type == "python":
102
+ # For Python code, use ast to analyze and modify the code
103
+ import ast
104
+
105
+ import astor
106
+
107
+ with open(file, 'r') as f:
108
+ source = f.read()
109
+
110
+ # Parse the source code
111
+ try:
112
+ tree = ast.parse(source)
113
+ # Get the last node
114
+ if tree.body:
115
+ last_node = tree.body[-1]
116
+ # If it's an expression, wrap it in a print
117
+ if isinstance(last_node, ast.Expr):
118
+ tree.body[-1] = ast.Expr(
119
+ value=ast.Call(
120
+ func=ast.Name(id='print', ctx=ast.Load()),
121
+ args=[
122
+ ast.Call(
123
+ func=ast.Name(
124
+ id='repr', ctx=ast.Load()
125
+ ),
126
+ args=[last_node.value],
127
+ keywords=[],
128
+ )
129
+ ],
130
+ keywords=[],
131
+ )
132
+ )
133
+ # Fix missing source locations
134
+ ast.fix_missing_locations(tree)
135
+ # Convert back to source
136
+ modified_source = astor.to_source(tree)
137
+ # Create a temporary file with the modified source
138
+ temp_file = self._create_temp_file(modified_source, "py")
139
+ cmd = shlex.split(f"python {temp_file!s}")
140
+ except SyntaxError:
141
+ # If parsing fails, run the original file
142
+ cmd = shlex.split(
143
+ self._CODE_EXECUTE_CMD_MAPPING[code_type].format(
144
+ file_name=str(file)
145
+ )
146
+ )
147
+ else:
148
+ # For non-Python code, use standard execution
149
+ cmd = shlex.split(
150
+ self._CODE_EXECUTE_CMD_MAPPING[code_type].format(
151
+ file_name=str(file)
152
+ )
104
153
  )
105
- )
154
+
106
155
  proc = subprocess.Popen(
107
156
  cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
108
157
  )
109
158
  stdout, stderr = proc.communicate()
159
+ return_code = proc.returncode
160
+
161
+ # Clean up temporary file if it was created
162
+ if code_type == "python" and 'temp_file' in locals():
163
+ temp_file.unlink()
164
+
110
165
  if self.print_stdout and stdout:
111
166
  print("======stdout======")
112
167
  print(Fore.GREEN + stdout + Fore.RESET)
@@ -115,8 +170,19 @@ class SubprocessInterpreter(BaseInterpreter):
115
170
  print("======stderr======")
116
171
  print(Fore.RED + stderr + Fore.RESET)
117
172
  print("==================")
118
- exec_result = f"{stdout}"
119
- exec_result += f"(stderr: {stderr})" if stderr else ""
173
+
174
+ # Build the execution result
175
+ exec_result = ""
176
+ if stdout:
177
+ exec_result += stdout
178
+ if stderr:
179
+ exec_result += f"(stderr: {stderr})"
180
+ if return_code != 0:
181
+ error_msg = f"(Execution failed with return code {return_code})"
182
+ if not stderr:
183
+ exec_result += error_msg
184
+ elif error_msg not in stderr:
185
+ exec_result += error_msg
120
186
  return exec_result
121
187
 
122
188
  def run(
camel/models/__init__.py CHANGED
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from .aiml_model import AIMLModel
14
15
  from .anthropic_model import AnthropicModel
15
16
  from .azure_openai_model import AzureOpenAIModel
16
17
  from .base_model import BaseModelBackend
@@ -72,4 +73,5 @@ __all__ = [
72
73
  'FishAudioModel',
73
74
  'InternLMModel',
74
75
  'MoonshotModel',
76
+ 'AIMLModel',
75
77
  ]
@@ -0,0 +1,147 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, List, Optional, Union
16
+
17
+ from openai import OpenAI, Stream
18
+
19
+ from camel.configs import AIML_API_PARAMS, AIMLConfig
20
+ from camel.messages import OpenAIMessage
21
+ from camel.models.base_model import BaseModelBackend
22
+ from camel.types import (
23
+ ChatCompletion,
24
+ ChatCompletionChunk,
25
+ ModelType,
26
+ )
27
+ from camel.utils import (
28
+ BaseTokenCounter,
29
+ OpenAITokenCounter,
30
+ api_keys_required,
31
+ )
32
+
33
+
34
+ class AIMLModel(BaseModelBackend):
35
+ r"""AIML API in a unified BaseModelBackend interface.
36
+
37
+ Args:
38
+ model_type (Union[ModelType, str]): Model for which a backend is
39
+ created.
40
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
41
+ that will be fed into OpenAI client. If :obj:`None`,
42
+ :obj:`AIMLConfig().as_dict()` will be used.
43
+ (default: :obj:`None`)
44
+ api_key (Optional[str], optional): The API key for authenticating with
45
+ the AIML service. (default: :obj:`None`)
46
+ url (Optional[str], optional): The URL to the AIML service. If
47
+ not provided, :obj:`https://api.aimlapi.com/v1` will be used.
48
+ (default: :obj:`None`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
51
+ ModelType.GPT_4O_MINI)` will be used.
52
+ (default: :obj:`None`)
53
+ """
54
+
55
+ @api_keys_required(
56
+ [
57
+ ("api_key", 'AIML_API_KEY'),
58
+ ]
59
+ )
60
+ def __init__(
61
+ self,
62
+ model_type: Union[ModelType, str],
63
+ model_config_dict: Optional[Dict[str, Any]] = None,
64
+ api_key: Optional[str] = None,
65
+ url: Optional[str] = None,
66
+ token_counter: Optional[BaseTokenCounter] = None,
67
+ ) -> None:
68
+ if model_config_dict is None:
69
+ model_config_dict = AIMLConfig().as_dict()
70
+ api_key = api_key or os.environ.get("AIML_API_KEY")
71
+ url = url or os.environ.get(
72
+ "AIML_API_BASE_URL",
73
+ "https://api.aimlapi.com/v1",
74
+ )
75
+ super().__init__(
76
+ model_type, model_config_dict, api_key, url, token_counter
77
+ )
78
+ self._client = OpenAI(
79
+ timeout=180,
80
+ max_retries=3,
81
+ api_key=self._api_key,
82
+ base_url=self._url,
83
+ )
84
+
85
+ def run(
86
+ self,
87
+ messages: List[OpenAIMessage],
88
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
+ r"""Runs inference of OpenAI chat completion.
90
+
91
+ Args:
92
+ messages (List[OpenAIMessage]): Message list with the chat history
93
+ in OpenAI API format.
94
+
95
+ Returns:
96
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
97
+ `ChatCompletion` in the non-stream mode, or
98
+ `Stream[ChatCompletionChunk]` in the stream mode.
99
+ """
100
+ # Process model configuration parameters
101
+ model_config = self.model_config_dict.copy()
102
+
103
+ # Handle special case for tools parameter
104
+ if model_config.get('tools') is None:
105
+ model_config['tools'] = []
106
+
107
+ response = self._client.chat.completions.create(
108
+ messages=messages, model=self.model_type, **model_config
109
+ )
110
+ return response
111
+
112
+ @property
113
+ def token_counter(self) -> BaseTokenCounter:
114
+ r"""Initialize the token counter for the model backend.
115
+
116
+ Returns:
117
+ BaseTokenCounter: The token counter following the model's
118
+ tokenization style.
119
+ """
120
+ if not self._token_counter:
121
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
122
+ return self._token_counter
123
+
124
+ def check_model_config(self):
125
+ r"""Check whether the model configuration contains any
126
+ unexpected arguments to AIML API.
127
+
128
+ Raises:
129
+ ValueError: If the model configuration dictionary contains any
130
+ unexpected arguments to AIML API.
131
+ """
132
+ for param in self.model_config_dict:
133
+ if param not in AIML_API_PARAMS:
134
+ raise ValueError(
135
+ f"Unexpected argument `{param}` is "
136
+ "input into AIML model backend."
137
+ )
138
+
139
+ @property
140
+ def stream(self) -> bool:
141
+ """Returns whether the model is in stream mode, which sends partial
142
+ results each time.
143
+
144
+ Returns:
145
+ bool: Whether the model is in stream mode.
146
+ """
147
+ return self.model_config_dict.get('stream', False)
@@ -13,6 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from typing import Dict, Optional, Type, Union
15
15
 
16
+ from camel.models.aiml_model import AIMLModel
16
17
  from camel.models.anthropic_model import AnthropicModel
17
18
  from camel.models.azure_openai_model import AzureOpenAIModel
18
19
  from camel.models.base_model import BaseModelBackend
@@ -104,6 +105,8 @@ class ModelFactory:
104
105
  model_class = NvidiaModel
105
106
  elif model_platform.is_siliconflow:
106
107
  model_class = SiliconFlowModel
108
+ elif model_platform.is_aiml:
109
+ model_class = AIMLModel
107
110
 
108
111
  elif model_platform.is_openai and model_type.is_openai:
109
112
  model_class = OpenAIModel
@@ -18,7 +18,7 @@ from openai import OpenAI, Stream
18
18
 
19
19
  from camel.configs import SILICONFLOW_API_PARAMS, SiliconFlowConfig
20
20
  from camel.messages import OpenAIMessage
21
- from camel.models import BaseModelBackend
21
+ from camel.models.base_model import BaseModelBackend
22
22
  from camel.types import (
23
23
  ChatCompletion,
24
24
  ChatCompletionChunk,
@@ -168,11 +168,9 @@ class RolePlayingWorker(Worker):
168
168
  chat_history=chat_history_str,
169
169
  additional_info=task.additional_info,
170
170
  )
171
- req = BaseMessage.make_user_message(
172
- role_name="User",
173
- content=prompt,
171
+ response = self.summarize_agent.step(
172
+ prompt, response_format=TaskResult
174
173
  )
175
- response = self.summarize_agent.step(req, response_format=TaskResult)
176
174
  result_dict = json.loads(response.msg.content)
177
175
  task_result = TaskResult(**result_dict)
178
176
  task.result = task_result.content
@@ -19,7 +19,6 @@ from typing import Any, List
19
19
  from colorama import Fore
20
20
 
21
21
  from camel.agents import ChatAgent
22
- from camel.messages.base import BaseMessage
23
22
  from camel.societies.workforce.prompts import PROCESS_TASK_PROMPT
24
23
  from camel.societies.workforce.utils import TaskResult
25
24
  from camel.societies.workforce.worker import Worker
@@ -72,12 +71,8 @@ class SingleAgentWorker(Worker):
72
71
  dependency_tasks_info=dependency_tasks_info,
73
72
  additional_info=task.additional_info,
74
73
  )
75
- req = BaseMessage.make_user_message(
76
- role_name="User",
77
- content=prompt,
78
- )
79
74
  try:
80
- response = self.worker.step(req, response_format=TaskResult)
75
+ response = self.worker.step(prompt, response_format=TaskResult)
81
76
  except Exception as e:
82
77
  print(
83
78
  f"{Fore.RED}Error occurred while processing task {task.id}:"
@@ -281,13 +281,9 @@ class Workforce(BaseNode):
281
281
  child_nodes_info=self._get_child_nodes_info(),
282
282
  additional_info=task.additional_info,
283
283
  )
284
- req = BaseMessage.make_user_message(
285
- role_name="User",
286
- content=prompt,
287
- )
288
284
 
289
285
  response = self.coordinator_agent.step(
290
- req, response_format=TaskAssignResult
286
+ prompt, response_format=TaskAssignResult
291
287
  )
292
288
  result_dict = json.loads(response.msg.content)
293
289
  task_assign_result = TaskAssignResult(**result_dict)
@@ -315,11 +311,9 @@ class Workforce(BaseNode):
315
311
  child_nodes_info=self._get_child_nodes_info(),
316
312
  additional_info=task.additional_info,
317
313
  )
318
- req = BaseMessage.make_user_message(
319
- role_name="User",
320
- content=prompt,
314
+ response = self.coordinator_agent.step(
315
+ prompt, response_format=WorkerConf
321
316
  )
322
- response = self.coordinator_agent.step(req, response_format=WorkerConf)
323
317
  result_dict = json.loads(response.msg.content)
324
318
  new_node_conf = WorkerConf(**result_dict)
325
319
 
@@ -45,6 +45,7 @@ from .human_toolkit import HumanToolkit
45
45
  from .stripe_toolkit import StripeToolkit
46
46
  from .video_toolkit import VideoDownloaderToolkit
47
47
  from .dappier_toolkit import DappierToolkit
48
+ from .sympy_toolkit import SymPyToolkit
48
49
  from .semantic_scholar_toolkit import SemanticScholarToolkit
49
50
 
50
51
  __all__ = [
@@ -78,5 +79,6 @@ __all__ = [
78
79
  'MeshyToolkit',
79
80
  'OpenBBToolkit',
80
81
  'DappierToolkit',
82
+ 'SymPyToolkit',
81
83
  'SemanticScholarToolkit',
82
84
  ]
@@ -16,10 +16,9 @@ import os
16
16
  import time
17
17
  from typing import Any, Dict, List, Union
18
18
 
19
- from requests.exceptions import RequestException
20
-
21
19
  from camel.toolkits import FunctionTool
22
20
  from camel.toolkits.base import BaseToolkit
21
+ from camel.utils import retry_on_error
23
22
 
24
23
 
25
24
  class RedditToolkit(BaseToolkit):
@@ -61,30 +60,7 @@ class RedditToolkit(BaseToolkit):
61
60
  request_timeout=30, # Set a timeout to handle delays
62
61
  )
63
62
 
64
- def _retry_request(self, func, *args, **kwargs):
65
- r"""Retries a function in case of network-related errors.
66
-
67
- Args:
68
- func (callable): The function to be retried.
69
- *args: Arguments to pass to the function.
70
- **kwargs: Keyword arguments to pass to the function.
71
-
72
- Returns:
73
- Any: The result of the function call if successful.
74
-
75
- Raises:
76
- RequestException: If all retry attempts fail.
77
- """
78
- for attempt in range(self.retries):
79
- try:
80
- return func(*args, **kwargs)
81
- except RequestException as e:
82
- print(f"Attempt {attempt + 1}/{self.retries} failed: {e}")
83
- if attempt < self.retries - 1:
84
- time.sleep(self.delay)
85
- else:
86
- raise
87
-
63
+ @retry_on_error()
88
64
  def collect_top_posts(
89
65
  self,
90
66
  subreddit_name: str,
@@ -113,8 +89,8 @@ class RedditToolkit(BaseToolkit):
113
89
  "Please set the environment variables."
114
90
  )
115
91
 
116
- subreddit = self._retry_request(self.reddit.subreddit, subreddit_name)
117
- top_posts = self._retry_request(subreddit.top, limit=post_limit)
92
+ subreddit = self.reddit.subreddit(subreddit_name)
93
+ top_posts = subreddit.top(limit=post_limit)
118
94
  data = []
119
95
 
120
96
  for post in top_posts:
@@ -122,9 +98,7 @@ class RedditToolkit(BaseToolkit):
122
98
  "Post Title": post.title,
123
99
  "Comments": [
124
100
  {"Comment Body": comment.body, "Upvotes": comment.score}
125
- for comment in self._retry_request(
126
- lambda post=post: list(post.comments)
127
- )[:comment_limit]
101
+ for comment in list(post.comments)[:comment_limit]
128
102
  ],
129
103
  }
130
104
  data.append(post_data)
@@ -192,15 +166,11 @@ class RedditToolkit(BaseToolkit):
192
166
  data = []
193
167
 
194
168
  for subreddit_name in subreddits:
195
- subreddit = self._retry_request(
196
- self.reddit.subreddit, subreddit_name
197
- )
198
- top_posts = self._retry_request(subreddit.top, limit=post_limit)
169
+ subreddit = self.reddit.subreddit(subreddit_name)
170
+ top_posts = subreddit.top(limit=post_limit)
199
171
 
200
172
  for post in top_posts:
201
- for comment in self._retry_request(
202
- lambda post=post: list(post.comments)
203
- )[:comment_limit]:
173
+ for comment in list(post.comments)[:comment_limit]:
204
174
  # Print comment body for debugging
205
175
  if any(
206
176
  keyword.lower() in comment.body.lower()