camel-ai 0.2.71a3__py3-none-any.whl → 0.2.71a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/interpreters/docker_interpreter.py +3 -2
- camel/loaders/base_loader.py +85 -0
- camel/societies/workforce/workforce.py +144 -33
- camel/toolkits/__init__.py +5 -2
- camel/toolkits/craw4ai_toolkit.py +2 -2
- camel/toolkits/file_write_toolkit.py +6 -6
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +9 -3
- camel/toolkits/hybrid_browser_toolkit/unified_analyzer.js +31 -8
- camel/toolkits/note_taking_toolkit.py +90 -0
- camel/toolkits/openai_image_toolkit.py +292 -0
- camel/toolkits/slack_toolkit.py +4 -4
- camel/toolkits/terminal_toolkit.py +223 -73
- camel/utils/mcp_client.py +37 -1
- {camel_ai-0.2.71a3.dist-info → camel_ai-0.2.71a4.dist-info}/METADATA +43 -4
- {camel_ai-0.2.71a3.dist-info → camel_ai-0.2.71a4.dist-info}/RECORD +18 -16
- camel/toolkits/dalle_toolkit.py +0 -175
- {camel_ai-0.2.71a3.dist-info → camel_ai-0.2.71a4.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.71a3.dist-info → camel_ai-0.2.71a4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import List, Optional
|
|
16
|
+
|
|
17
|
+
from camel.toolkits.base import BaseToolkit
|
|
18
|
+
from camel.toolkits.function_tool import FunctionTool
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class NoteTakingToolkit(BaseToolkit):
|
|
22
|
+
r"""A toolkit for taking notes in a Markdown file.
|
|
23
|
+
|
|
24
|
+
This toolkit allows an agent to create, append to, and update a specific
|
|
25
|
+
Markdown file for note-taking purposes.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
note_file_path: str = "notes/notes.md",
|
|
31
|
+
timeout: Optional[float] = None,
|
|
32
|
+
) -> None:
|
|
33
|
+
r"""Initialize the NoteTakingToolkit.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
note_file_path (str): The path to the note file.
|
|
37
|
+
(default: :obj:`notes/notes.md`)
|
|
38
|
+
timeout (Optional[float]): The timeout for the toolkit.
|
|
39
|
+
"""
|
|
40
|
+
super().__init__(timeout=timeout)
|
|
41
|
+
self.note_file_path = Path(note_file_path)
|
|
42
|
+
self.note_file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
43
|
+
|
|
44
|
+
def take_note(self, content: str, update: bool = False) -> str:
|
|
45
|
+
r"""Takes a note and saves it to the note file.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
content (str): The content of the note to be saved.
|
|
49
|
+
update (bool): If True, the existing note file will be
|
|
50
|
+
overwritten with the new content. If False, the new content
|
|
51
|
+
will be appended to the end of the file.
|
|
52
|
+
(default: :obj:`False`)
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
str: A message indicating the result of the operation.
|
|
56
|
+
"""
|
|
57
|
+
mode = "w" if update else "a"
|
|
58
|
+
try:
|
|
59
|
+
with self.note_file_path.open(mode, encoding="utf-8") as f:
|
|
60
|
+
f.write(content + "\n")
|
|
61
|
+
action = "updated" if update else "appended to"
|
|
62
|
+
return f"Note successfully {action} in {self.note_file_path}."
|
|
63
|
+
except Exception as e:
|
|
64
|
+
return f"Error taking note: {e}"
|
|
65
|
+
|
|
66
|
+
def read_note(self) -> str:
|
|
67
|
+
r"""Reads the content of the note file.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
str: The content of the note file, or an error message if the
|
|
71
|
+
file cannot be read.
|
|
72
|
+
"""
|
|
73
|
+
try:
|
|
74
|
+
if not self.note_file_path.exists():
|
|
75
|
+
return "Note file does not exist yet."
|
|
76
|
+
return self.note_file_path.read_text(encoding="utf-8")
|
|
77
|
+
except Exception as e:
|
|
78
|
+
return f"Error reading note: {e}"
|
|
79
|
+
|
|
80
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
81
|
+
r"""Return a list of FunctionTool objects representing the functions
|
|
82
|
+
in the toolkit.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
List[FunctionTool]: A list of FunctionTool objects.
|
|
86
|
+
"""
|
|
87
|
+
return [
|
|
88
|
+
FunctionTool(self.take_note),
|
|
89
|
+
FunctionTool(self.read_note),
|
|
90
|
+
]
|
|
@@ -0,0 +1,292 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import base64
|
|
16
|
+
import os
|
|
17
|
+
import uuid
|
|
18
|
+
from io import BytesIO
|
|
19
|
+
from typing import List, Literal, Optional
|
|
20
|
+
|
|
21
|
+
from openai import OpenAI
|
|
22
|
+
from PIL import Image
|
|
23
|
+
|
|
24
|
+
from camel.logger import get_logger
|
|
25
|
+
from camel.toolkits import FunctionTool
|
|
26
|
+
from camel.toolkits.base import BaseToolkit
|
|
27
|
+
from camel.utils import MCPServer, api_keys_required
|
|
28
|
+
|
|
29
|
+
logger = get_logger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@MCPServer()
|
|
33
|
+
class OpenAIImageToolkit(BaseToolkit):
|
|
34
|
+
r"""A class toolkit for image generation using OpenAI's
|
|
35
|
+
Image Generation API.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
@api_keys_required(
|
|
39
|
+
[
|
|
40
|
+
("api_key", "OPENAI_API_KEY"),
|
|
41
|
+
]
|
|
42
|
+
)
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
model: Optional[
|
|
46
|
+
Literal["gpt-image-1", "dall-e-3", "dall-e-2"]
|
|
47
|
+
] = "gpt-image-1",
|
|
48
|
+
timeout: Optional[float] = None,
|
|
49
|
+
api_key: Optional[str] = None,
|
|
50
|
+
url: Optional[str] = None,
|
|
51
|
+
size: Optional[
|
|
52
|
+
Literal[
|
|
53
|
+
"256x256",
|
|
54
|
+
"512x512",
|
|
55
|
+
"1024x1024",
|
|
56
|
+
"1536x1024",
|
|
57
|
+
"1024x1536",
|
|
58
|
+
"1792x1024",
|
|
59
|
+
"1024x1792",
|
|
60
|
+
"auto",
|
|
61
|
+
]
|
|
62
|
+
] = "1024x1024",
|
|
63
|
+
quality: Optional[
|
|
64
|
+
Literal["auto", "low", "medium", "high", "standard", "hd"]
|
|
65
|
+
] = "standard",
|
|
66
|
+
response_format: Optional[Literal["url", "b64_json"]] = "b64_json",
|
|
67
|
+
n: Optional[int] = 1,
|
|
68
|
+
background: Optional[
|
|
69
|
+
Literal["transparent", "opaque", "auto"]
|
|
70
|
+
] = "auto",
|
|
71
|
+
style: Optional[Literal["vivid", "natural"]] = None,
|
|
72
|
+
image_save_path: Optional[str] = "image_save",
|
|
73
|
+
):
|
|
74
|
+
r"""Initializes a new instance of the OpenAIImageToolkit class.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
api_key (Optional[str]): The API key for authenticating
|
|
78
|
+
with the OpenAI service. (default: :obj:`None`)
|
|
79
|
+
url (Optional[str]): The url to the OpenAI service.
|
|
80
|
+
(default: :obj:`None`)
|
|
81
|
+
model (Optional[str]): The model to use.
|
|
82
|
+
(default: :obj:`"dall-e-3"`)
|
|
83
|
+
timeout (Optional[float]): The timeout value for API requests
|
|
84
|
+
in seconds. If None, no timeout is applied.
|
|
85
|
+
(default: :obj:`None`)
|
|
86
|
+
size (Optional[Literal["256x256", "512x512", "1024x1024",
|
|
87
|
+
"1536x1024", "1024x1536", "1792x1024", "1024x1792",
|
|
88
|
+
"auto"]]):
|
|
89
|
+
The size of the image to generate.
|
|
90
|
+
(default: :obj:`"1024x1024"`)
|
|
91
|
+
quality (Optional[Literal["auto", "low", "medium", "high",
|
|
92
|
+
"standard", "hd"]]):The quality of the image to
|
|
93
|
+
generate. Different models support different values.
|
|
94
|
+
(default: :obj:`"standard"`)
|
|
95
|
+
response_format (Optional[Literal["url", "b64_json"]]):
|
|
96
|
+
The format of the response.(default: :obj:`"b64_json"`)
|
|
97
|
+
n (Optional[int]): The number of images to generate.
|
|
98
|
+
(default: :obj:`1`)
|
|
99
|
+
background (Optional[Literal["transparent", "opaque", "auto"]]):
|
|
100
|
+
The background of the image.(default: :obj:`"auto"`)
|
|
101
|
+
style (Optional[Literal["vivid", "natural"]]): The style of the
|
|
102
|
+
image.(default: :obj:`None`)
|
|
103
|
+
image_save_path (Optional[str]): The path to save the generated
|
|
104
|
+
image.(default: :obj:`"image_save"`)
|
|
105
|
+
"""
|
|
106
|
+
super().__init__(timeout=timeout)
|
|
107
|
+
api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
108
|
+
url = url or os.environ.get("OPENAI_API_BASE_URL")
|
|
109
|
+
self.client = OpenAI(api_key=api_key, base_url=url)
|
|
110
|
+
self.model = model
|
|
111
|
+
self.size = size
|
|
112
|
+
self.quality = quality
|
|
113
|
+
self.response_format = response_format
|
|
114
|
+
self.n = n
|
|
115
|
+
self.background = background
|
|
116
|
+
self.style = style
|
|
117
|
+
self.image_save_path: str = image_save_path or "image_save"
|
|
118
|
+
|
|
119
|
+
def base64_to_image(self, base64_string: str) -> Optional[Image.Image]:
|
|
120
|
+
r"""Converts a base64 encoded string into a PIL Image object.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
base64_string (str): The base64 encoded string of the image.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
Optional[Image.Image]: The PIL Image object or None if conversion
|
|
127
|
+
fails.
|
|
128
|
+
"""
|
|
129
|
+
try:
|
|
130
|
+
# decode the base64 string to get the image data
|
|
131
|
+
image_data = base64.b64decode(base64_string)
|
|
132
|
+
# create a memory buffer for the image data
|
|
133
|
+
image_buffer = BytesIO(image_data)
|
|
134
|
+
# open the image with PIL
|
|
135
|
+
image = Image.open(image_buffer)
|
|
136
|
+
return image
|
|
137
|
+
except Exception as e:
|
|
138
|
+
logger.error(
|
|
139
|
+
f"An error occurred while converting base64 to image: {e}"
|
|
140
|
+
)
|
|
141
|
+
return None
|
|
142
|
+
|
|
143
|
+
def _build_base_params(self, prompt: str) -> dict:
|
|
144
|
+
r"""Build base parameters dict for OpenAI API calls.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
prompt (str): The text prompt for the image operation.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
dict: Parameters dictionary with non-None values.
|
|
151
|
+
"""
|
|
152
|
+
params = {"prompt": prompt, "model": self.model}
|
|
153
|
+
|
|
154
|
+
# basic parameters supported by all models
|
|
155
|
+
if self.n is not None:
|
|
156
|
+
params["n"] = self.n # type: ignore[assignment]
|
|
157
|
+
if self.size is not None:
|
|
158
|
+
params["size"] = self.size
|
|
159
|
+
|
|
160
|
+
# Model-specific parameter filtering based on model
|
|
161
|
+
if self.model == "dall-e-2":
|
|
162
|
+
# dall-e-2 supports: prompt, model, n, size, response_format
|
|
163
|
+
if self.response_format is not None:
|
|
164
|
+
params["response_format"] = self.response_format
|
|
165
|
+
|
|
166
|
+
elif self.model == "dall-e-3":
|
|
167
|
+
# dall-e-3 supports: prompt, model, n,
|
|
168
|
+
# size, quality, response_format, style
|
|
169
|
+
if self.quality is not None:
|
|
170
|
+
params["quality"] = self.quality
|
|
171
|
+
if self.response_format is not None:
|
|
172
|
+
params["response_format"] = self.response_format
|
|
173
|
+
if self.style is not None:
|
|
174
|
+
params["style"] = self.style
|
|
175
|
+
|
|
176
|
+
elif self.model == "gpt-image-1":
|
|
177
|
+
# gpt-image-1 supports: prompt, model, n, size, quality, background
|
|
178
|
+
# Note: gpt-image-1 seems to default to b64_json response format
|
|
179
|
+
if self.quality is not None:
|
|
180
|
+
params["quality"] = self.quality
|
|
181
|
+
if self.background is not None:
|
|
182
|
+
params["background"] = self.background
|
|
183
|
+
|
|
184
|
+
return params
|
|
185
|
+
|
|
186
|
+
def _handle_api_response(
|
|
187
|
+
self, response, image_name: str, operation: str
|
|
188
|
+
) -> str:
|
|
189
|
+
r"""Handle API response from OpenAI image operations.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
response: The response object from OpenAI API.
|
|
193
|
+
image_name (str): Name for the saved image file.
|
|
194
|
+
operation (str): Operation type for success message ("generated").
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
str: Success message with image path/URL or error message.
|
|
198
|
+
"""
|
|
199
|
+
if response.data is None or len(response.data) == 0:
|
|
200
|
+
error_msg = "No image data returned from OpenAI API."
|
|
201
|
+
logger.error(error_msg)
|
|
202
|
+
return error_msg
|
|
203
|
+
|
|
204
|
+
results = []
|
|
205
|
+
|
|
206
|
+
for i, image_data in enumerate(response.data):
|
|
207
|
+
# check if response has URL or base64 data
|
|
208
|
+
if hasattr(image_data, 'url') and image_data.url:
|
|
209
|
+
image_url = image_data.url
|
|
210
|
+
results.append(f"Image URL: {image_url}")
|
|
211
|
+
elif hasattr(image_data, 'b64_json') and image_data.b64_json:
|
|
212
|
+
image_b64 = image_data.b64_json
|
|
213
|
+
|
|
214
|
+
# Save the image from base64
|
|
215
|
+
image_bytes = base64.b64decode(image_b64)
|
|
216
|
+
os.makedirs(self.image_save_path, exist_ok=True)
|
|
217
|
+
|
|
218
|
+
# Add index to filename when multiple images
|
|
219
|
+
if len(response.data) > 1:
|
|
220
|
+
filename = f"{image_name}_{i+1}_{uuid.uuid4().hex}.png"
|
|
221
|
+
else:
|
|
222
|
+
filename = f"{image_name}_{uuid.uuid4().hex}.png"
|
|
223
|
+
|
|
224
|
+
image_path = os.path.join(self.image_save_path, filename)
|
|
225
|
+
|
|
226
|
+
with open(image_path, "wb") as f:
|
|
227
|
+
f.write(image_bytes)
|
|
228
|
+
|
|
229
|
+
results.append(f"Image saved to {image_path}")
|
|
230
|
+
else:
|
|
231
|
+
error_msg = (
|
|
232
|
+
f"No valid image data (URL or base64) found in image {i+1}"
|
|
233
|
+
)
|
|
234
|
+
logger.error(error_msg)
|
|
235
|
+
results.append(error_msg)
|
|
236
|
+
|
|
237
|
+
if results:
|
|
238
|
+
count = len(response.data)
|
|
239
|
+
if count == 1:
|
|
240
|
+
return f"Image {operation} successfully. {results[0]}"
|
|
241
|
+
else:
|
|
242
|
+
return (
|
|
243
|
+
f"{count} images {operation} successfully:\n"
|
|
244
|
+
+ "\n".join(
|
|
245
|
+
f" {i+1}. {result}"
|
|
246
|
+
for i, result in enumerate(results)
|
|
247
|
+
)
|
|
248
|
+
)
|
|
249
|
+
else:
|
|
250
|
+
error_msg = "No valid image data found in any response"
|
|
251
|
+
logger.error(error_msg)
|
|
252
|
+
return error_msg
|
|
253
|
+
|
|
254
|
+
def generate_image(
|
|
255
|
+
self,
|
|
256
|
+
prompt: str,
|
|
257
|
+
image_name: str = "image",
|
|
258
|
+
) -> str:
|
|
259
|
+
r"""Generate an image using OpenAI's Image Generation models.
|
|
260
|
+
The generated image will be saved locally (for ``b64_json`` response
|
|
261
|
+
formats) or an image URL will be returned (for ``url`` response
|
|
262
|
+
formats).
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
prompt (str): The text prompt to generate the image.
|
|
266
|
+
image_name (str): The name of the image to save.
|
|
267
|
+
(default: :obj:`"image"`)
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
str: the content of the model response or format of the response.
|
|
271
|
+
"""
|
|
272
|
+
try:
|
|
273
|
+
params = self._build_base_params(prompt)
|
|
274
|
+
response = self.client.images.generate(**params)
|
|
275
|
+
return self._handle_api_response(response, image_name, "generated")
|
|
276
|
+
except Exception as e:
|
|
277
|
+
error_msg = f"An error occurred while generating image: {e}"
|
|
278
|
+
logger.error(error_msg)
|
|
279
|
+
return error_msg
|
|
280
|
+
|
|
281
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
282
|
+
r"""Returns a list of FunctionTool objects representing the
|
|
283
|
+
functions in the toolkit.
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
List[FunctionTool]: A list of FunctionTool objects
|
|
287
|
+
representing the functions in the toolkit.
|
|
288
|
+
"""
|
|
289
|
+
return [
|
|
290
|
+
FunctionTool(self.generate_image),
|
|
291
|
+
# could add edit_image function later
|
|
292
|
+
]
|
camel/toolkits/slack_toolkit.py
CHANGED
|
@@ -252,9 +252,6 @@ class SlackToolkit(BaseToolkit):
|
|
|
252
252
|
Returns:
|
|
253
253
|
str: A confirmation message indicating whether the message was sent
|
|
254
254
|
successfully or an error message.
|
|
255
|
-
|
|
256
|
-
Raises:
|
|
257
|
-
SlackApiError: If an error occurs while sending the message.
|
|
258
255
|
"""
|
|
259
256
|
from slack_sdk.errors import SlackApiError
|
|
260
257
|
|
|
@@ -268,7 +265,10 @@ class SlackToolkit(BaseToolkit):
|
|
|
268
265
|
response = slack_client.chat_postMessage(
|
|
269
266
|
channel=channel_id, text=message
|
|
270
267
|
)
|
|
271
|
-
return
|
|
268
|
+
return (
|
|
269
|
+
f"Message: {message} sent successfully, "
|
|
270
|
+
f"got response: {response}"
|
|
271
|
+
)
|
|
272
272
|
except SlackApiError as e:
|
|
273
273
|
return f"Error creating conversation: {e.response['error']}"
|
|
274
274
|
|