camel-ai 0.2.75a6__py3-none-any.whl → 0.2.76a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +159 -38
- camel/configs/__init__.py +3 -0
- camel/configs/amd_config.py +70 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/memories/__init__.py +2 -1
- camel/memories/agent_memories.py +3 -1
- camel/memories/blocks/chat_history_block.py +17 -2
- camel/models/__init__.py +2 -0
- camel/models/amd_model.py +101 -0
- camel/models/model_factory.py +2 -0
- camel/models/openai_model.py +0 -6
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/workforce/single_agent_worker.py +44 -38
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/toolkits/__init__.py +14 -5
- camel/toolkits/aci_toolkit.py +45 -0
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +683 -0
- camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +194 -34
- camel/toolkits/function_tool.py +6 -1
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +12 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +19 -2
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +95 -59
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +619 -95
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +7 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +115 -219
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +1 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +401 -80
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +9 -5
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
- camel/toolkits/markitdown_toolkit.py +27 -1
- camel/toolkits/mcp_toolkit.py +39 -14
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +18 -8
- camel/toolkits/terminal_toolkit.py +12 -2
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +16 -10
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/types/enums.py +11 -0
- camel/utils/commons.py +2 -0
- camel/utils/context_utils.py +395 -0
- camel/utils/mcp.py +136 -2
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/METADATA +6 -3
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/RECORD +52 -41
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/licenses/LICENSE +0 -0
|
@@ -92,11 +92,12 @@ class HybridBrowserToolkit(BaseToolkit, RegisteredAgentToolkit):
|
|
|
92
92
|
user_data_dir: Optional[str] = None,
|
|
93
93
|
stealth: bool = False,
|
|
94
94
|
web_agent_model: Optional[BaseModelBackend] = None,
|
|
95
|
-
cache_dir: str =
|
|
95
|
+
cache_dir: Optional[str] = None,
|
|
96
96
|
enabled_tools: Optional[List[str]] = None,
|
|
97
97
|
browser_log_to_file: bool = False,
|
|
98
|
+
log_dir: Optional[str] = None,
|
|
98
99
|
session_id: Optional[str] = None,
|
|
99
|
-
default_start_url: str =
|
|
100
|
+
default_start_url: Optional[str] = None,
|
|
100
101
|
default_timeout: Optional[int] = None,
|
|
101
102
|
short_timeout: Optional[int] = None,
|
|
102
103
|
navigation_timeout: Optional[int] = None,
|
|
@@ -144,6 +145,8 @@ class HybridBrowserToolkit(BaseToolkit, RegisteredAgentToolkit):
|
|
|
144
145
|
and page loading times.
|
|
145
146
|
Logs are saved to an auto-generated timestamped file.
|
|
146
147
|
Defaults to `False`.
|
|
148
|
+
log_dir (Optional[str]): Custom directory path for log files.
|
|
149
|
+
If None, defaults to "browser_log". Defaults to `None`.
|
|
147
150
|
session_id (Optional[str]): A unique identifier for this browser
|
|
148
151
|
session. When multiple HybridBrowserToolkit instances are
|
|
149
152
|
used
|
|
@@ -199,9 +202,10 @@ class HybridBrowserToolkit(BaseToolkit, RegisteredAgentToolkit):
|
|
|
199
202
|
self._user_data_dir = user_data_dir
|
|
200
203
|
self._stealth = stealth
|
|
201
204
|
self._web_agent_model = web_agent_model
|
|
202
|
-
self._cache_dir = cache_dir
|
|
205
|
+
self._cache_dir = cache_dir or "tmp/"
|
|
203
206
|
self._browser_log_to_file = browser_log_to_file
|
|
204
|
-
self.
|
|
207
|
+
self._log_dir = log_dir
|
|
208
|
+
self._default_start_url = default_start_url or "https://google.com/"
|
|
205
209
|
self._session_id = session_id or "default"
|
|
206
210
|
self._viewport_limit = viewport_limit
|
|
207
211
|
|
|
@@ -237,7 +241,7 @@ class HybridBrowserToolkit(BaseToolkit, RegisteredAgentToolkit):
|
|
|
237
241
|
# Set up log file if needed
|
|
238
242
|
if self.log_to_file:
|
|
239
243
|
# Create log directory if it doesn't exist
|
|
240
|
-
log_dir = "browser_log"
|
|
244
|
+
log_dir = self._log_dir if self._log_dir else "browser_log"
|
|
241
245
|
os.makedirs(log_dir, exist_ok=True)
|
|
242
246
|
|
|
243
247
|
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import base64
|
|
16
16
|
import os
|
|
17
17
|
from io import BytesIO
|
|
18
|
-
from typing import List, Literal, Optional, Union
|
|
18
|
+
from typing import ClassVar, List, Literal, Optional, Tuple, Union
|
|
19
19
|
|
|
20
20
|
from openai import OpenAI
|
|
21
21
|
from PIL import Image
|
|
@@ -29,21 +29,32 @@ logger = get_logger(__name__)
|
|
|
29
29
|
|
|
30
30
|
|
|
31
31
|
@MCPServer()
|
|
32
|
-
class
|
|
33
|
-
r"""A class toolkit for image generation using OpenAI
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
32
|
+
class ImageGenToolkit(BaseToolkit):
|
|
33
|
+
r"""A class toolkit for image generation using Grok and OpenAI models."""
|
|
34
|
+
|
|
35
|
+
GROK_MODELS: ClassVar[List[str]] = [
|
|
36
|
+
"grok-2-image",
|
|
37
|
+
"grok-2-image-latest",
|
|
38
|
+
"grok-2-image-1212",
|
|
39
|
+
]
|
|
40
|
+
OPENAI_MODELS: ClassVar[List[str]] = [
|
|
41
|
+
"gpt-image-1",
|
|
42
|
+
"dall-e-3",
|
|
43
|
+
"dall-e-2",
|
|
44
|
+
]
|
|
45
|
+
|
|
42
46
|
def __init__(
|
|
43
47
|
self,
|
|
44
48
|
model: Optional[
|
|
45
|
-
Literal[
|
|
46
|
-
|
|
49
|
+
Literal[
|
|
50
|
+
"gpt-image-1",
|
|
51
|
+
"dall-e-3",
|
|
52
|
+
"dall-e-2",
|
|
53
|
+
"grok-2-image",
|
|
54
|
+
"grok-2-image-latest",
|
|
55
|
+
"grok-2-image-1212",
|
|
56
|
+
]
|
|
57
|
+
] = "dall-e-3",
|
|
47
58
|
timeout: Optional[float] = None,
|
|
48
59
|
api_key: Optional[str] = None,
|
|
49
60
|
url: Optional[str] = None,
|
|
@@ -72,12 +83,12 @@ class OpenAIImageToolkit(BaseToolkit):
|
|
|
72
83
|
# NOTE: Some arguments are set in the constructor to prevent the agent
|
|
73
84
|
# from making invalid API calls with model-specific parameters. For
|
|
74
85
|
# example, the 'style' argument is only supported by 'dall-e-3'.
|
|
75
|
-
r"""Initializes a new instance of the
|
|
86
|
+
r"""Initializes a new instance of the ImageGenToolkit class.
|
|
76
87
|
|
|
77
88
|
Args:
|
|
78
89
|
api_key (Optional[str]): The API key for authenticating
|
|
79
|
-
with the
|
|
80
|
-
url (Optional[str]): The url to the
|
|
90
|
+
with the image model service. (default: :obj:`None`)
|
|
91
|
+
url (Optional[str]): The url to the image model service.
|
|
81
92
|
(default: :obj:`None`)
|
|
82
93
|
model (Optional[str]): The model to use.
|
|
83
94
|
(default: :obj:`"dall-e-3"`)
|
|
@@ -103,9 +114,23 @@ class OpenAIImageToolkit(BaseToolkit):
|
|
|
103
114
|
image.(default: :obj:`"image_save"`)
|
|
104
115
|
"""
|
|
105
116
|
super().__init__(timeout=timeout)
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
117
|
+
if model not in self.GROK_MODELS + self.OPENAI_MODELS:
|
|
118
|
+
available_models = sorted(self.OPENAI_MODELS + self.GROK_MODELS)
|
|
119
|
+
raise ValueError(
|
|
120
|
+
f"Unsupported model: {model}. "
|
|
121
|
+
f"Supported models are: {available_models}"
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
# Set default url for Grok models
|
|
125
|
+
url = "https://api.x.ai/v1" if model in self.GROK_MODELS else url
|
|
126
|
+
|
|
127
|
+
api_key, base_url = (
|
|
128
|
+
self.get_openai_credentials(url, api_key)
|
|
129
|
+
if model in self.OPENAI_MODELS
|
|
130
|
+
else self.get_grok_credentials(url, api_key)
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
self.client = OpenAI(api_key=api_key, base_url=base_url)
|
|
109
134
|
self.model = model
|
|
110
135
|
self.size = size
|
|
111
136
|
self.quality = quality
|
|
@@ -139,7 +164,7 @@ class OpenAIImageToolkit(BaseToolkit):
|
|
|
139
164
|
return None
|
|
140
165
|
|
|
141
166
|
def _build_base_params(self, prompt: str, n: Optional[int] = None) -> dict:
|
|
142
|
-
r"""Build base parameters dict for
|
|
167
|
+
r"""Build base parameters dict for Image Model API calls.
|
|
143
168
|
|
|
144
169
|
Args:
|
|
145
170
|
prompt (str): The text prompt for the image operation.
|
|
@@ -153,6 +178,10 @@ class OpenAIImageToolkit(BaseToolkit):
|
|
|
153
178
|
# basic parameters supported by all models
|
|
154
179
|
if n is not None:
|
|
155
180
|
params["n"] = n # type: ignore[assignment]
|
|
181
|
+
|
|
182
|
+
if self.model in self.GROK_MODELS:
|
|
183
|
+
return params
|
|
184
|
+
|
|
156
185
|
if self.size is not None:
|
|
157
186
|
params["size"] = self.size
|
|
158
187
|
|
|
@@ -179,16 +208,18 @@ class OpenAIImageToolkit(BaseToolkit):
|
|
|
179
208
|
params["quality"] = self.quality
|
|
180
209
|
if self.background is not None:
|
|
181
210
|
params["background"] = self.background
|
|
182
|
-
|
|
183
211
|
return params
|
|
184
212
|
|
|
185
213
|
def _handle_api_response(
|
|
186
|
-
self,
|
|
214
|
+
self,
|
|
215
|
+
response,
|
|
216
|
+
image_name: Union[str, List[str]],
|
|
217
|
+
operation: str,
|
|
187
218
|
) -> str:
|
|
188
|
-
r"""Handle API response from
|
|
219
|
+
r"""Handle API response from image operations.
|
|
189
220
|
|
|
190
221
|
Args:
|
|
191
|
-
response: The response object from
|
|
222
|
+
response: The response object from image model API.
|
|
192
223
|
image_name (Union[str, List[str]]): Name(s) for the saved image
|
|
193
224
|
file(s). If str, the same name is used for all images (will
|
|
194
225
|
cause error for multiple images). If list, must have exactly
|
|
@@ -198,8 +229,9 @@ class OpenAIImageToolkit(BaseToolkit):
|
|
|
198
229
|
Returns:
|
|
199
230
|
str: Success message with image path/URL or error message.
|
|
200
231
|
"""
|
|
232
|
+
source = "Grok" if self.model in self.GROK_MODELS else "OpenAI"
|
|
201
233
|
if response.data is None or len(response.data) == 0:
|
|
202
|
-
error_msg = "No image data returned from
|
|
234
|
+
error_msg = f"No image data returned from {source} API."
|
|
203
235
|
logger.error(error_msg)
|
|
204
236
|
return error_msg
|
|
205
237
|
|
|
@@ -283,7 +315,7 @@ class OpenAIImageToolkit(BaseToolkit):
|
|
|
283
315
|
image_name: Union[str, List[str]] = "image.png",
|
|
284
316
|
n: int = 1,
|
|
285
317
|
) -> str:
|
|
286
|
-
r"""Generate an image using
|
|
318
|
+
r"""Generate an image using image models.
|
|
287
319
|
The generated image will be saved locally (for ``b64_json`` response
|
|
288
320
|
formats) or an image URL will be returned (for ``url`` response
|
|
289
321
|
formats).
|
|
@@ -309,15 +341,50 @@ class OpenAIImageToolkit(BaseToolkit):
|
|
|
309
341
|
logger.error(error_msg)
|
|
310
342
|
return error_msg
|
|
311
343
|
|
|
344
|
+
@api_keys_required([("api_key", "XAI_API_KEY")])
|
|
345
|
+
def get_grok_credentials(self, url, api_key) -> Tuple[str, str]: # type: ignore[return-value]
|
|
346
|
+
r"""Get API credentials for the specified Grok model.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
url (str): The base URL for the Grok API.
|
|
350
|
+
api_key (str): The API key for the Grok API.
|
|
351
|
+
|
|
352
|
+
Returns:
|
|
353
|
+
tuple: (api_key, base_url)
|
|
354
|
+
"""
|
|
355
|
+
|
|
356
|
+
# Get credentials based on model type
|
|
357
|
+
api_key = api_key or os.getenv("XAI_API_KEY")
|
|
358
|
+
return api_key, url
|
|
359
|
+
|
|
360
|
+
@api_keys_required([("api_key", "OPENAI_API_KEY")])
|
|
361
|
+
def get_openai_credentials(self, url, api_key) -> Tuple[str, str | None]: # type: ignore[return-value]
|
|
362
|
+
r"""Get API credentials for the specified OpenAI model.
|
|
363
|
+
|
|
364
|
+
Args:
|
|
365
|
+
url (str): The base URL for the OpenAI API.
|
|
366
|
+
api_key (str): The API key for the OpenAI API.
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
Tuple[str, str | None]: (api_key, base_url)
|
|
370
|
+
"""
|
|
371
|
+
|
|
372
|
+
api_key = api_key or os.getenv("OPENAI_API_KEY")
|
|
373
|
+
base_url = url or os.getenv("OPENAI_API_BASE_URL")
|
|
374
|
+
return api_key, base_url
|
|
375
|
+
|
|
312
376
|
def get_tools(self) -> List[FunctionTool]:
|
|
313
|
-
r"""Returns a list of FunctionTool objects representing the
|
|
314
|
-
|
|
377
|
+
r"""Returns a list of FunctionTool objects representing the functions
|
|
378
|
+
in the toolkit.
|
|
315
379
|
|
|
316
380
|
Returns:
|
|
317
|
-
List[FunctionTool]: A list of FunctionTool objects
|
|
318
|
-
|
|
381
|
+
List[FunctionTool]: A list of FunctionTool objects representing the
|
|
382
|
+
functions in the toolkit.
|
|
319
383
|
"""
|
|
320
384
|
return [
|
|
321
385
|
FunctionTool(self.generate_image),
|
|
322
|
-
# could add edit_image function later
|
|
323
386
|
]
|
|
387
|
+
|
|
388
|
+
|
|
389
|
+
# Backward compatibility alias
|
|
390
|
+
OpenAIImageToolkit = ImageGenToolkit
|
|
@@ -25,12 +25,38 @@ logger = get_logger(__name__)
|
|
|
25
25
|
|
|
26
26
|
@MCPServer()
|
|
27
27
|
class MarkItDownToolkit(BaseToolkit):
|
|
28
|
-
r"""A class representing a toolkit for MarkItDown.
|
|
28
|
+
r"""A class representing a toolkit for MarkItDown.
|
|
29
|
+
|
|
30
|
+
.. deprecated::
|
|
31
|
+
MarkItDownToolkit is deprecated. Use FileToolkit instead, which now
|
|
32
|
+
includes the same functionality through its read_file method that
|
|
33
|
+
supports both single files and multiple files.
|
|
34
|
+
|
|
35
|
+
Example migration:
|
|
36
|
+
# Old way
|
|
37
|
+
from camel.toolkits import MarkItDownToolkit
|
|
38
|
+
toolkit = MarkItDownToolkit()
|
|
39
|
+
content = toolkit.read_files(['file1.pdf', 'file2.docx'])
|
|
40
|
+
|
|
41
|
+
# New way
|
|
42
|
+
from camel.toolkits import FileToolkit
|
|
43
|
+
toolkit = FileToolkit()
|
|
44
|
+
content = toolkit.read_file(['file1.pdf', 'file2.docx'])
|
|
45
|
+
"""
|
|
29
46
|
|
|
30
47
|
def __init__(
|
|
31
48
|
self,
|
|
32
49
|
timeout: Optional[float] = None,
|
|
33
50
|
):
|
|
51
|
+
import warnings
|
|
52
|
+
|
|
53
|
+
warnings.warn(
|
|
54
|
+
"MarkItDownToolkit is deprecated and will be removed in a future "
|
|
55
|
+
"version. Please use FileToolkit instead, which now includes "
|
|
56
|
+
"read_file method that supports both single and multiple files.",
|
|
57
|
+
DeprecationWarning,
|
|
58
|
+
stacklevel=2,
|
|
59
|
+
)
|
|
34
60
|
super().__init__(timeout=timeout)
|
|
35
61
|
|
|
36
62
|
def read_files(self, file_paths: List[str]) -> Dict[str, str]:
|
camel/toolkits/mcp_toolkit.py
CHANGED
|
@@ -220,26 +220,34 @@ class MCPToolkit(BaseToolkit):
|
|
|
220
220
|
self._exit_stack = AsyncExitStack()
|
|
221
221
|
|
|
222
222
|
try:
|
|
223
|
-
#
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
except Exception as e:
|
|
231
|
-
logger.error(f"Failed to connect to client {i+1}: {e}")
|
|
232
|
-
# AsyncExitStack will handle cleanup of already connected
|
|
233
|
-
await self._exit_stack.aclose()
|
|
234
|
-
self._exit_stack = None
|
|
235
|
-
error_msg = f"Failed to connect to client {i+1}: {e}"
|
|
236
|
-
raise MCPConnectionError(error_msg) from e
|
|
223
|
+
# Apply timeout to the entire connection process
|
|
224
|
+
import asyncio
|
|
225
|
+
|
|
226
|
+
timeout_seconds = self.timeout or 30.0
|
|
227
|
+
await asyncio.wait_for(
|
|
228
|
+
self._connect_all_clients(), timeout=timeout_seconds
|
|
229
|
+
)
|
|
237
230
|
|
|
238
231
|
self._is_connected = True
|
|
239
232
|
msg = f"Successfully connected to {len(self.clients)} MCP servers"
|
|
240
233
|
logger.info(msg)
|
|
241
234
|
return self
|
|
242
235
|
|
|
236
|
+
except (asyncio.TimeoutError, asyncio.CancelledError):
|
|
237
|
+
self._is_connected = False
|
|
238
|
+
if self._exit_stack:
|
|
239
|
+
await self._exit_stack.aclose()
|
|
240
|
+
self._exit_stack = None
|
|
241
|
+
|
|
242
|
+
timeout_seconds = self.timeout or 30.0
|
|
243
|
+
error_msg = (
|
|
244
|
+
f"Connection timeout after {timeout_seconds}s. "
|
|
245
|
+
f"One or more MCP servers are not responding. "
|
|
246
|
+
f"Please check if the servers are running and accessible."
|
|
247
|
+
)
|
|
248
|
+
logger.error(error_msg)
|
|
249
|
+
raise MCPConnectionError(error_msg)
|
|
250
|
+
|
|
243
251
|
except Exception:
|
|
244
252
|
self._is_connected = False
|
|
245
253
|
if self._exit_stack:
|
|
@@ -247,6 +255,23 @@ class MCPToolkit(BaseToolkit):
|
|
|
247
255
|
self._exit_stack = None
|
|
248
256
|
raise
|
|
249
257
|
|
|
258
|
+
async def _connect_all_clients(self):
|
|
259
|
+
r"""Connect to all clients sequentially."""
|
|
260
|
+
# Connect to all clients using AsyncExitStack
|
|
261
|
+
for i, client in enumerate(self.clients):
|
|
262
|
+
try:
|
|
263
|
+
# Use MCPClient directly as async context manager
|
|
264
|
+
await self._exit_stack.enter_async_context(client)
|
|
265
|
+
msg = f"Connected to client {i+1}/{len(self.clients)}"
|
|
266
|
+
logger.debug(msg)
|
|
267
|
+
except Exception as e:
|
|
268
|
+
logger.error(f"Failed to connect to client {i+1}: {e}")
|
|
269
|
+
# AsyncExitStack will cleanup already connected clients
|
|
270
|
+
await self._exit_stack.aclose()
|
|
271
|
+
self._exit_stack = None
|
|
272
|
+
error_msg = f"Failed to connect to client {i+1}: {e}"
|
|
273
|
+
raise MCPConnectionError(error_msg) from e
|
|
274
|
+
|
|
250
275
|
async def disconnect(self):
|
|
251
276
|
r"""Disconnect from all MCP servers."""
|
|
252
277
|
if not self._is_connected:
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
from camel.toolkits import BaseToolkit, FunctionTool
|
|
19
|
+
|
|
20
|
+
from .mcp_toolkit import MCPToolkit
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MinimaxMCPToolkit(BaseToolkit):
|
|
24
|
+
r"""MinimaxMCPToolkit provides an interface for interacting with
|
|
25
|
+
MiniMax AI services using the MiniMax MCP server.
|
|
26
|
+
|
|
27
|
+
This toolkit enables access to MiniMax's multimedia generation
|
|
28
|
+
capabilities including text-to-audio, voice cloning, video generation,
|
|
29
|
+
image generation, music generation, and voice design.
|
|
30
|
+
|
|
31
|
+
This toolkit can be used as an async context manager for automatic
|
|
32
|
+
connection management:
|
|
33
|
+
|
|
34
|
+
# Using explicit API key
|
|
35
|
+
async with MinimaxMCPToolkit(api_key="your-key") as toolkit:
|
|
36
|
+
tools = toolkit.get_tools()
|
|
37
|
+
# Toolkit is automatically disconnected when exiting
|
|
38
|
+
|
|
39
|
+
# Using environment variables (recommended for security)
|
|
40
|
+
# Set MINIMAX_API_KEY=your-key in environment
|
|
41
|
+
async with MinimaxMCPToolkit() as toolkit:
|
|
42
|
+
tools = toolkit.get_tools()
|
|
43
|
+
|
|
44
|
+
Environment Variables:
|
|
45
|
+
MINIMAX_API_KEY: MiniMax API key for authentication
|
|
46
|
+
MINIMAX_API_HOST: API host URL (default: https://api.minimax.io)
|
|
47
|
+
MINIMAX_MCP_BASE_PATH: Base path for output files
|
|
48
|
+
|
|
49
|
+
Attributes:
|
|
50
|
+
timeout (Optional[float]): Connection timeout in seconds.
|
|
51
|
+
(default: :obj:`None`)
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(
|
|
55
|
+
self,
|
|
56
|
+
api_key: Optional[str] = None,
|
|
57
|
+
api_host: str = "https://api.minimax.io",
|
|
58
|
+
base_path: Optional[str] = None,
|
|
59
|
+
timeout: Optional[float] = None,
|
|
60
|
+
) -> None:
|
|
61
|
+
r"""Initializes the MinimaxMCPToolkit.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
api_key (Optional[str]): MiniMax API key for authentication.
|
|
65
|
+
If None, will attempt to read from MINIMAX_API_KEY
|
|
66
|
+
environment variable. (default: :obj:`None`)
|
|
67
|
+
api_host (str): MiniMax API host URL. Can be either
|
|
68
|
+
"https://api.minimax.io" (global) or
|
|
69
|
+
"https://api.minimaxi.com" (mainland China).
|
|
70
|
+
Can also be read from MINIMAX_API_HOST environment variable.
|
|
71
|
+
(default: :obj:`"https://api.minimax.io"`)
|
|
72
|
+
base_path (Optional[str]): Base path for output files.
|
|
73
|
+
If None, uses current working directory. Can also be read
|
|
74
|
+
from MINIMAX_MCP_BASE_PATH environment variable.
|
|
75
|
+
(default: :obj:`None`)
|
|
76
|
+
timeout (Optional[float]): Connection timeout in seconds.
|
|
77
|
+
(default: :obj:`None`)
|
|
78
|
+
"""
|
|
79
|
+
super().__init__(timeout=timeout)
|
|
80
|
+
|
|
81
|
+
# Read API key from parameter or environment variable
|
|
82
|
+
if api_key is None:
|
|
83
|
+
api_key = os.getenv("MINIMAX_API_KEY")
|
|
84
|
+
|
|
85
|
+
if not api_key:
|
|
86
|
+
raise ValueError(
|
|
87
|
+
"api_key must be provided either as a parameter or through "
|
|
88
|
+
"the MINIMAX_API_KEY environment variable"
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Read API host from environment variable if not overridden
|
|
92
|
+
env_api_host = os.getenv("MINIMAX_API_HOST")
|
|
93
|
+
if env_api_host:
|
|
94
|
+
api_host = env_api_host
|
|
95
|
+
|
|
96
|
+
# Read base path from environment variable if not provided
|
|
97
|
+
if base_path is None:
|
|
98
|
+
base_path = os.getenv("MINIMAX_MCP_BASE_PATH")
|
|
99
|
+
|
|
100
|
+
# Set up environment variables for the MCP server
|
|
101
|
+
env = {
|
|
102
|
+
"MINIMAX_API_KEY": api_key,
|
|
103
|
+
"MINIMAX_API_HOST": api_host,
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
if base_path:
|
|
107
|
+
env["MINIMAX_MCP_BASE_PATH"] = base_path
|
|
108
|
+
|
|
109
|
+
self._mcp_toolkit = MCPToolkit(
|
|
110
|
+
config_dict={
|
|
111
|
+
"mcpServers": {
|
|
112
|
+
"minimax": {
|
|
113
|
+
"command": "uvx",
|
|
114
|
+
"args": ["minimax-mcp", "-y"],
|
|
115
|
+
"env": env,
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
},
|
|
119
|
+
timeout=timeout,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
async def connect(self):
|
|
123
|
+
r"""Explicitly connect to the MiniMax MCP server."""
|
|
124
|
+
await self._mcp_toolkit.connect()
|
|
125
|
+
|
|
126
|
+
async def disconnect(self):
|
|
127
|
+
r"""Explicitly disconnect from the MiniMax MCP server."""
|
|
128
|
+
await self._mcp_toolkit.disconnect()
|
|
129
|
+
|
|
130
|
+
@property
|
|
131
|
+
def is_connected(self) -> bool:
|
|
132
|
+
r"""Check if the toolkit is connected to the MCP server.
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
bool: True if connected, False otherwise.
|
|
136
|
+
"""
|
|
137
|
+
return self._mcp_toolkit.is_connected
|
|
138
|
+
|
|
139
|
+
async def __aenter__(self) -> "MinimaxMCPToolkit":
|
|
140
|
+
r"""Async context manager entry point.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
MinimaxMCPToolkit: The connected toolkit instance.
|
|
144
|
+
|
|
145
|
+
Example:
|
|
146
|
+
async with MinimaxMCPToolkit(api_key="your-key") as toolkit:
|
|
147
|
+
tools = toolkit.get_tools()
|
|
148
|
+
"""
|
|
149
|
+
await self.connect()
|
|
150
|
+
return self
|
|
151
|
+
|
|
152
|
+
async def __aexit__(self, _exc_type, _exc_val, _exc_tb) -> None:
|
|
153
|
+
r"""Async context manager exit point.
|
|
154
|
+
|
|
155
|
+
Automatically disconnects from the MiniMax MCP server.
|
|
156
|
+
"""
|
|
157
|
+
await self.disconnect()
|
|
158
|
+
|
|
159
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
160
|
+
r"""Returns a list of tools provided by the MiniMax MCP server.
|
|
161
|
+
|
|
162
|
+
This includes tools for:
|
|
163
|
+
- Text-to-audio conversion
|
|
164
|
+
- Voice cloning
|
|
165
|
+
- Video generation
|
|
166
|
+
- Image generation
|
|
167
|
+
- Music generation
|
|
168
|
+
- Voice design
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
List[FunctionTool]: List of available MiniMax AI tools.
|
|
172
|
+
"""
|
|
173
|
+
return self._mcp_toolkit.get_tools()
|
|
174
|
+
|
|
175
|
+
def get_text_tools(self) -> str:
|
|
176
|
+
r"""Returns a string containing the descriptions of the tools.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
str: A string containing the descriptions of all MiniMax tools.
|
|
180
|
+
"""
|
|
181
|
+
return self._mcp_toolkit.get_text_tools()
|
|
182
|
+
|
|
183
|
+
async def call_tool(
|
|
184
|
+
self, tool_name: str, tool_args: Dict[str, Any]
|
|
185
|
+
) -> Any:
|
|
186
|
+
r"""Call a MiniMax tool by name.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
tool_name (str): Name of the tool to call.
|
|
190
|
+
tool_args (Dict[str, Any]): Arguments to pass to the tool.
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
Any: The result of the tool call.
|
|
194
|
+
"""
|
|
195
|
+
return await self._mcp_toolkit.call_tool(tool_name, tool_args)
|
|
@@ -138,33 +138,43 @@ class NoteTakingToolkit(BaseToolkit):
|
|
|
138
138
|
self.registry.append(note_name)
|
|
139
139
|
self._save_registry()
|
|
140
140
|
|
|
141
|
-
def create_note(
|
|
141
|
+
def create_note(
|
|
142
|
+
self, note_name: str, content: str, overwrite: bool = False
|
|
143
|
+
) -> str:
|
|
142
144
|
r"""Creates a new note with a unique name.
|
|
143
145
|
|
|
144
146
|
This function will create a new file for your note.
|
|
145
|
-
|
|
146
|
-
to add content to an existing note, use the `append_note`
|
|
147
|
-
instead.
|
|
147
|
+
By default, you must provide a `note_name` that does not already exist.
|
|
148
|
+
If you want to add content to an existing note, use the `append_note`
|
|
149
|
+
function instead. If you want to overwrite an existing note, set
|
|
150
|
+
`overwrite=True`.
|
|
148
151
|
|
|
149
152
|
Args:
|
|
150
153
|
note_name (str): The name for your new note (without the .md
|
|
151
|
-
extension). This name must be unique.
|
|
154
|
+
extension). This name must be unique unless overwrite is True.
|
|
152
155
|
content (str): The initial content to write in the note.
|
|
156
|
+
overwrite (bool): Whether to overwrite an existing note.
|
|
157
|
+
Defaults to False.
|
|
153
158
|
|
|
154
159
|
Returns:
|
|
155
160
|
str: A message confirming the creation of the note or an error if
|
|
156
|
-
the note name is not valid or already exists
|
|
161
|
+
the note name is not valid or already exists
|
|
162
|
+
(when overwrite=False).
|
|
157
163
|
"""
|
|
158
164
|
try:
|
|
159
165
|
note_path = self.working_directory / f"{note_name}.md"
|
|
166
|
+
existed_before = note_path.exists()
|
|
160
167
|
|
|
161
|
-
if
|
|
168
|
+
if existed_before and not overwrite:
|
|
162
169
|
return f"Error: Note '{note_name}.md' already exists."
|
|
163
170
|
|
|
164
171
|
note_path.write_text(content, encoding="utf-8")
|
|
165
172
|
self._register_note(note_name)
|
|
166
173
|
|
|
167
|
-
|
|
174
|
+
if existed_before and overwrite:
|
|
175
|
+
return f"Note '{note_name}.md' successfully overwritten."
|
|
176
|
+
else:
|
|
177
|
+
return f"Note '{note_name}.md' successfully created."
|
|
168
178
|
except Exception as e:
|
|
169
179
|
return f"Error creating note: {e}"
|
|
170
180
|
|
|
@@ -66,6 +66,9 @@ class TerminalToolkit(BaseToolkit):
|
|
|
66
66
|
connecting them to the terminal's standard input. This is useful
|
|
67
67
|
for commands that require user input, like `ssh`. Interactive mode
|
|
68
68
|
is only supported on macOS and Linux. (default: :obj:`False`)
|
|
69
|
+
log_dir (Optional[str]): Custom directory path for log files.
|
|
70
|
+
If None, logs are saved to the current working directory.
|
|
71
|
+
(default: :obj:`None`)
|
|
69
72
|
|
|
70
73
|
Note:
|
|
71
74
|
Most functions are compatible with Unix-based systems (macOS, Linux).
|
|
@@ -83,6 +86,7 @@ class TerminalToolkit(BaseToolkit):
|
|
|
83
86
|
clone_current_env: bool = False,
|
|
84
87
|
safe_mode: bool = True,
|
|
85
88
|
interactive: bool = False,
|
|
89
|
+
log_dir: Optional[str] = None,
|
|
86
90
|
):
|
|
87
91
|
# Store timeout before calling super().__init__
|
|
88
92
|
self._timeout = timeout
|
|
@@ -99,6 +103,7 @@ class TerminalToolkit(BaseToolkit):
|
|
|
99
103
|
self.use_shell_mode = use_shell_mode
|
|
100
104
|
self._human_takeover_active = False
|
|
101
105
|
self.interactive = interactive
|
|
106
|
+
self.log_dir = log_dir
|
|
102
107
|
|
|
103
108
|
self.python_executable = sys.executable
|
|
104
109
|
self.is_macos = platform.system() == 'Darwin'
|
|
@@ -153,8 +158,13 @@ class TerminalToolkit(BaseToolkit):
|
|
|
153
158
|
r"""Set up file output to replace GUI, using a fixed file to simulate
|
|
154
159
|
terminal.
|
|
155
160
|
"""
|
|
156
|
-
|
|
157
|
-
self.
|
|
161
|
+
# Use custom log directory if provided, otherwise use current directory
|
|
162
|
+
if self.log_dir:
|
|
163
|
+
# Create the log directory if it doesn't exist
|
|
164
|
+
os.makedirs(self.log_dir, exist_ok=True)
|
|
165
|
+
self.log_file = os.path.join(self.log_dir, "camel_terminal.txt")
|
|
166
|
+
else:
|
|
167
|
+
self.log_file = os.path.join(os.getcwd(), "camel_terminal.txt")
|
|
158
168
|
|
|
159
169
|
# Inform the user
|
|
160
170
|
logger.info(f"Terminal output will be redirected to: {self.log_file}")
|