camel-ai 0.2.75a6__py3-none-any.whl → 0.2.76a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +151 -37
- camel/configs/__init__.py +3 -0
- camel/configs/amd_config.py +70 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/models/__init__.py +2 -0
- camel/models/amd_model.py +101 -0
- camel/models/model_factory.py +2 -0
- camel/models/openai_model.py +0 -6
- camel/runtimes/daytona_runtime.py +11 -12
- camel/toolkits/__init__.py +5 -3
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/function_tool.py +6 -1
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +8 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +12 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +33 -14
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +135 -40
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +2 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +43 -207
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +231 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +241 -56
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +5 -1
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
- camel/toolkits/mcp_toolkit.py +39 -14
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/terminal_toolkit.py +12 -2
- camel/toolkits/video_analysis_toolkit.py +16 -10
- camel/types/enums.py +11 -0
- camel/utils/commons.py +2 -0
- camel/utils/mcp.py +136 -2
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a0.dist-info}/METADATA +5 -3
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a0.dist-info}/RECORD +38 -31
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a0.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,395 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import asyncio
|
|
15
|
+
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Union
|
|
16
|
+
|
|
17
|
+
from camel.interpreters.base import BaseInterpreter
|
|
18
|
+
from camel.interpreters.interpreter_error import InterpreterError
|
|
19
|
+
from camel.logger import get_logger
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MicrosandboxInterpreter(BaseInterpreter):
|
|
25
|
+
r"""Microsandbox Code Interpreter implementation.
|
|
26
|
+
|
|
27
|
+
This interpreter provides secure code execution using microsandbox,
|
|
28
|
+
a self-hosted platform for secure execution of untrusted user/AI code.
|
|
29
|
+
It supports Python code execution via PythonSandbox, JavaScript/Node.js
|
|
30
|
+
code execution via NodeSandbox, and shell commands via the command
|
|
31
|
+
interface.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
require_confirm (bool, optional): If True, prompt user before running
|
|
35
|
+
code strings for security. (default: :obj:`True`)
|
|
36
|
+
server_url (str, optional): URL of the microsandbox server. If not
|
|
37
|
+
provided, will use MSB_SERVER_URL environment variable, then
|
|
38
|
+
fall back to http://127.0.0.1:5555. (default: :obj:`None`)
|
|
39
|
+
api_key (str, optional): API key for microsandbox authentication.
|
|
40
|
+
If not provided, will use MSB_API_KEY environment variable.
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
namespace (str, optional): Namespace for the sandbox.
|
|
43
|
+
(default: :obj:`"default"`)
|
|
44
|
+
sandbox_name (str, optional): Name of the sandbox instance. If not
|
|
45
|
+
provided, a random name will be generated by the SDK.
|
|
46
|
+
(default: :obj:`None`)
|
|
47
|
+
timeout (int, optional): Default timeout for code execution in seconds.
|
|
48
|
+
(default: :obj:`30`)
|
|
49
|
+
|
|
50
|
+
Environment Variables:
|
|
51
|
+
MSB_SERVER_URL: URL of the microsandbox server.
|
|
52
|
+
MSB_API_KEY: API key for microsandbox authentication.
|
|
53
|
+
|
|
54
|
+
Note:
|
|
55
|
+
The SDK handles parameter priority as: user parameter > environment
|
|
56
|
+
variable > default value.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
_CODE_TYPE_MAPPING: ClassVar[Dict[str, str]] = {
|
|
60
|
+
# Python code - uses PythonSandbox
|
|
61
|
+
"python": "python_sandbox",
|
|
62
|
+
"py3": "python_sandbox",
|
|
63
|
+
"python3": "python_sandbox",
|
|
64
|
+
"py": "python_sandbox",
|
|
65
|
+
# JavaScript/Node.js code - uses NodeSandbox
|
|
66
|
+
"javascript": "node_sandbox",
|
|
67
|
+
"js": "node_sandbox",
|
|
68
|
+
"node": "node_sandbox",
|
|
69
|
+
"typescript": "node_sandbox",
|
|
70
|
+
"ts": "node_sandbox",
|
|
71
|
+
# Shell commands - uses command.run()
|
|
72
|
+
"bash": "shell_command",
|
|
73
|
+
"shell": "shell_command",
|
|
74
|
+
"sh": "shell_command",
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
def __init__(
|
|
78
|
+
self,
|
|
79
|
+
require_confirm: bool = True,
|
|
80
|
+
server_url: Optional[str] = None,
|
|
81
|
+
api_key: Optional[str] = None,
|
|
82
|
+
namespace: str = "default",
|
|
83
|
+
sandbox_name: Optional[str] = None,
|
|
84
|
+
timeout: int = 30,
|
|
85
|
+
) -> None:
|
|
86
|
+
from microsandbox import (
|
|
87
|
+
NodeSandbox,
|
|
88
|
+
PythonSandbox,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Store parameters, let SDK handle defaults and environment variables
|
|
92
|
+
self.require_confirm = require_confirm
|
|
93
|
+
self.server_url = server_url # None means use SDK default logic
|
|
94
|
+
self.api_key = api_key # None means use SDK default logic
|
|
95
|
+
self.namespace = namespace
|
|
96
|
+
self.sandbox_name = (
|
|
97
|
+
sandbox_name # None means SDK generates random name
|
|
98
|
+
)
|
|
99
|
+
self.timeout = timeout
|
|
100
|
+
|
|
101
|
+
# Store sandbox configuration
|
|
102
|
+
self._sandbox_config = {
|
|
103
|
+
"server_url": self.server_url,
|
|
104
|
+
"namespace": self.namespace,
|
|
105
|
+
"name": self.sandbox_name,
|
|
106
|
+
"api_key": self.api_key,
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
# Store sandbox classes for reuse
|
|
110
|
+
self._PythonSandbox = PythonSandbox
|
|
111
|
+
self._NodeSandbox = NodeSandbox
|
|
112
|
+
|
|
113
|
+
# Log initialization info
|
|
114
|
+
logger.info("Initialized MicrosandboxInterpreter")
|
|
115
|
+
logger.info(f"Namespace: {self.namespace}")
|
|
116
|
+
if self.sandbox_name:
|
|
117
|
+
logger.info(f"Sandbox name: {self.sandbox_name}")
|
|
118
|
+
else:
|
|
119
|
+
logger.info("Sandbox name: will be auto-generated by SDK")
|
|
120
|
+
|
|
121
|
+
def run(
|
|
122
|
+
self,
|
|
123
|
+
code: str,
|
|
124
|
+
code_type: str = "python",
|
|
125
|
+
) -> str:
|
|
126
|
+
r"""Executes the given code in the microsandbox.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
code (str): The code string to execute.
|
|
130
|
+
code_type (str): The type of code to execute. Supported types:
|
|
131
|
+
'python', 'javascript', 'bash'. (default: :obj:`python`)
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
str: The string representation of the output of the executed code.
|
|
135
|
+
|
|
136
|
+
Raises:
|
|
137
|
+
InterpreterError: If the `code_type` is not supported or if any
|
|
138
|
+
runtime error occurs during the execution of the code.
|
|
139
|
+
"""
|
|
140
|
+
if code_type not in self._CODE_TYPE_MAPPING:
|
|
141
|
+
raise InterpreterError(
|
|
142
|
+
f"Unsupported code type {code_type}. "
|
|
143
|
+
f"`{self.__class__.__name__}` only supports "
|
|
144
|
+
f"{', '.join(list(self._CODE_TYPE_MAPPING.keys()))}."
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Print code for security checking
|
|
148
|
+
if self.require_confirm:
|
|
149
|
+
logger.info(
|
|
150
|
+
f"The following {code_type} code will run on "
|
|
151
|
+
f"microsandbox: {code}"
|
|
152
|
+
)
|
|
153
|
+
self._confirm_execution("code")
|
|
154
|
+
|
|
155
|
+
# Run the code asynchronously
|
|
156
|
+
return asyncio.run(self._run_async(code, code_type))
|
|
157
|
+
|
|
158
|
+
async def _run_async(self, code: str, code_type: str) -> str:
|
|
159
|
+
r"""Asynchronously executes code in microsandbox.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
code (str): The code to execute.
|
|
163
|
+
code_type (str): The type of code to execute.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
str: The output of the executed code.
|
|
167
|
+
|
|
168
|
+
Raises:
|
|
169
|
+
InterpreterError: If execution fails.
|
|
170
|
+
"""
|
|
171
|
+
try:
|
|
172
|
+
execution_method = self._CODE_TYPE_MAPPING[code_type]
|
|
173
|
+
|
|
174
|
+
if execution_method == "python_sandbox":
|
|
175
|
+
return await self._run_python_code(code)
|
|
176
|
+
elif execution_method == "node_sandbox":
|
|
177
|
+
return await self._run_node_code(code)
|
|
178
|
+
elif execution_method == "shell_command":
|
|
179
|
+
return await self._run_shell_command(code)
|
|
180
|
+
else:
|
|
181
|
+
raise InterpreterError(
|
|
182
|
+
f"Unsupported execution method: {execution_method}"
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
except Exception as e:
|
|
186
|
+
raise InterpreterError(
|
|
187
|
+
f"Error executing code in microsandbox: {e}"
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
async def _run_python_code(self, code: str) -> str:
|
|
191
|
+
r"""Execute Python code using PythonSandbox.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
code (str): Python code to execute.
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
str: Execution output.
|
|
198
|
+
"""
|
|
199
|
+
async with self._PythonSandbox.create(
|
|
200
|
+
**self._sandbox_config
|
|
201
|
+
) as sandbox:
|
|
202
|
+
execution = await asyncio.wait_for(
|
|
203
|
+
sandbox.run(code), timeout=self.timeout
|
|
204
|
+
)
|
|
205
|
+
return await self._get_execution_output(execution)
|
|
206
|
+
|
|
207
|
+
async def _run_node_code(self, code: str) -> str:
|
|
208
|
+
r"""Execute JavaScript/Node.js code using NodeSandbox.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
code (str): JavaScript/Node.js code to execute.
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
str: Execution output.
|
|
215
|
+
"""
|
|
216
|
+
async with self._NodeSandbox.create(**self._sandbox_config) as sandbox:
|
|
217
|
+
execution = await asyncio.wait_for(
|
|
218
|
+
sandbox.run(code), timeout=self.timeout
|
|
219
|
+
)
|
|
220
|
+
return await self._get_execution_output(execution)
|
|
221
|
+
|
|
222
|
+
async def _run_shell_command(self, code: str) -> str:
|
|
223
|
+
r"""Execute shell commands directly.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
code (str): Shell command to execute.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
str: Command execution output.
|
|
230
|
+
"""
|
|
231
|
+
# Use any sandbox for shell commands
|
|
232
|
+
async with self._PythonSandbox.create(
|
|
233
|
+
**self._sandbox_config
|
|
234
|
+
) as sandbox:
|
|
235
|
+
execution = await asyncio.wait_for(
|
|
236
|
+
sandbox.command.run("bash", ["-c", code]), timeout=self.timeout
|
|
237
|
+
)
|
|
238
|
+
return await self._get_command_output(execution)
|
|
239
|
+
|
|
240
|
+
async def _get_execution_output(self, execution) -> str:
|
|
241
|
+
r"""Get output from code execution.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
execution: Execution object from sandbox.run().
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
str: Formatted execution output.
|
|
248
|
+
"""
|
|
249
|
+
output = await execution.output()
|
|
250
|
+
error = await execution.error()
|
|
251
|
+
|
|
252
|
+
result_parts = []
|
|
253
|
+
if output and output.strip():
|
|
254
|
+
result_parts.append(output.strip())
|
|
255
|
+
if error and error.strip():
|
|
256
|
+
result_parts.append(f"STDERR: {error.strip()}")
|
|
257
|
+
|
|
258
|
+
return (
|
|
259
|
+
"\n".join(result_parts)
|
|
260
|
+
if result_parts
|
|
261
|
+
else "Code executed successfully (no output)"
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
async def _get_command_output(self, execution) -> str:
|
|
265
|
+
r"""Get output from command execution.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
execution: CommandExecution object from sandbox.command.run().
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
str: Formatted command output.
|
|
272
|
+
"""
|
|
273
|
+
output = await execution.output()
|
|
274
|
+
error = await execution.error()
|
|
275
|
+
|
|
276
|
+
result_parts = []
|
|
277
|
+
if output and output.strip():
|
|
278
|
+
result_parts.append(output.strip())
|
|
279
|
+
if error and error.strip():
|
|
280
|
+
result_parts.append(f"STDERR: {error.strip()}")
|
|
281
|
+
if hasattr(execution, 'exit_code') and execution.exit_code != 0:
|
|
282
|
+
result_parts.append(f"Exit code: {execution.exit_code}")
|
|
283
|
+
|
|
284
|
+
return (
|
|
285
|
+
"\n".join(result_parts)
|
|
286
|
+
if result_parts
|
|
287
|
+
else "Command executed successfully (no output)"
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
def _confirm_execution(self, execution_type: str) -> None:
|
|
291
|
+
r"""Prompt user for confirmation before executing code or commands.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
execution_type (str): Type of execution ('code' or 'command').
|
|
295
|
+
|
|
296
|
+
Raises:
|
|
297
|
+
InterpreterError: If user declines to run the code/command.
|
|
298
|
+
"""
|
|
299
|
+
while True:
|
|
300
|
+
choice = input(f"Running {execution_type}? [Y/n]:").lower()
|
|
301
|
+
if choice in ["y", "yes", "ye"]:
|
|
302
|
+
break
|
|
303
|
+
elif choice not in ["no", "n"]:
|
|
304
|
+
continue
|
|
305
|
+
raise InterpreterError(
|
|
306
|
+
f"Execution halted: User opted not to run the "
|
|
307
|
+
f"{execution_type}. "
|
|
308
|
+
f"This choice stops the current operation and any "
|
|
309
|
+
f"further {execution_type} execution."
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
def supported_code_types(self) -> List[str]:
|
|
313
|
+
r"""Provides supported code types by the interpreter."""
|
|
314
|
+
return list(self._CODE_TYPE_MAPPING.keys())
|
|
315
|
+
|
|
316
|
+
def update_action_space(self, action_space: Dict[str, Any]) -> None:
|
|
317
|
+
r"""Updates action space for interpreter.
|
|
318
|
+
|
|
319
|
+
Args:
|
|
320
|
+
action_space: Action space dictionary (unused in microsandbox).
|
|
321
|
+
|
|
322
|
+
Note:
|
|
323
|
+
Microsandbox doesn't support action space updates as it runs
|
|
324
|
+
in isolated environments for each execution.
|
|
325
|
+
"""
|
|
326
|
+
# Explicitly acknowledge the parameter to avoid linting warnings
|
|
327
|
+
_ = action_space
|
|
328
|
+
logger.warning(
|
|
329
|
+
"Microsandbox doesn't support action space updates. "
|
|
330
|
+
"Code runs in isolated environments for each execution."
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
def execute_command(self, command: str) -> Union[str, Tuple[str, str]]:
|
|
334
|
+
r"""Execute a shell command in the microsandbox.
|
|
335
|
+
|
|
336
|
+
This method is designed for package management and system
|
|
337
|
+
administration tasks. It executes shell commands directly
|
|
338
|
+
using the microsandbox command interface.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
command (str): The shell command to execute (e.g.,
|
|
342
|
+
"pip install numpy", "ls -la", "apt-get update").
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
Union[str, Tuple[str, str]]: The output of the command.
|
|
346
|
+
|
|
347
|
+
Examples:
|
|
348
|
+
>>> interpreter.execute_command("pip install numpy")
|
|
349
|
+
>>> interpreter.execute_command("npm install express")
|
|
350
|
+
>>> interpreter.execute_command("ls -la /tmp")
|
|
351
|
+
"""
|
|
352
|
+
# Print command for security checking
|
|
353
|
+
if self.require_confirm:
|
|
354
|
+
logger.info(
|
|
355
|
+
f"The following shell command will run on "
|
|
356
|
+
f"microsandbox: {command}"
|
|
357
|
+
)
|
|
358
|
+
self._confirm_execution("command")
|
|
359
|
+
|
|
360
|
+
return asyncio.run(self._execute_command_async(command))
|
|
361
|
+
|
|
362
|
+
async def _execute_command_async(self, command: str) -> str:
|
|
363
|
+
r"""Asynchronously executes a shell command in microsandbox.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
command (str): The shell command to execute.
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
str: The output of the command execution.
|
|
370
|
+
|
|
371
|
+
Raises:
|
|
372
|
+
InterpreterError: If execution fails.
|
|
373
|
+
"""
|
|
374
|
+
try:
|
|
375
|
+
async with self._PythonSandbox.create(
|
|
376
|
+
**self._sandbox_config
|
|
377
|
+
) as sandbox:
|
|
378
|
+
execution = await asyncio.wait_for(
|
|
379
|
+
sandbox.command.run("bash", ["-c", command]),
|
|
380
|
+
timeout=self.timeout,
|
|
381
|
+
)
|
|
382
|
+
return await self._get_command_output(execution)
|
|
383
|
+
|
|
384
|
+
except Exception as e:
|
|
385
|
+
raise InterpreterError(
|
|
386
|
+
f"Error executing command in microsandbox: {e}"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
def __del__(self) -> None:
|
|
390
|
+
r"""Destructor for the MicrosandboxInterpreter class.
|
|
391
|
+
|
|
392
|
+
Microsandbox uses context managers for resource management,
|
|
393
|
+
so no explicit cleanup is needed.
|
|
394
|
+
"""
|
|
395
|
+
logger.debug("MicrosandboxInterpreter cleaned up")
|
camel/models/__init__.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from .aiml_model import AIMLModel
|
|
15
|
+
from .amd_model import AMDModel
|
|
15
16
|
from .anthropic_model import AnthropicModel
|
|
16
17
|
from .aws_bedrock_model import AWSBedrockModel
|
|
17
18
|
from .azure_openai_model import AzureOpenAIModel
|
|
@@ -62,6 +63,7 @@ __all__ = [
|
|
|
62
63
|
'OpenRouterModel',
|
|
63
64
|
'AzureOpenAIModel',
|
|
64
65
|
'AnthropicModel',
|
|
66
|
+
'AMDModel',
|
|
65
67
|
'MistralModel',
|
|
66
68
|
'GroqModel',
|
|
67
69
|
'StubModel',
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, Optional, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs import AMD_API_PARAMS, AMDConfig
|
|
19
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
|
+
from camel.types import ModelType
|
|
21
|
+
from camel.utils import BaseTokenCounter, api_keys_required
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AMDModel(OpenAICompatibleModel):
|
|
25
|
+
r"""AMD API in a unified OpenAICompatibleModel interface.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
29
|
+
created, one of AMD series.
|
|
30
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
31
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
32
|
+
:obj:`None`, :obj:`AMDConfig().as_dict()` will be used.
|
|
33
|
+
(default: :obj:`None`)
|
|
34
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
35
|
+
the AMD service. (default: :obj:`None`)
|
|
36
|
+
url (Optional[str], optional): The url to the AMD service.
|
|
37
|
+
(default: :obj:`None`)
|
|
38
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
39
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
40
|
+
ModelType.GPT_4)` will be used.
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
43
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
44
|
+
environment variable or default to 180 seconds.
|
|
45
|
+
(default: :obj:`None`)
|
|
46
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
47
|
+
(default: :obj:`3`)
|
|
48
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
49
|
+
initialization.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
@api_keys_required(
|
|
53
|
+
[
|
|
54
|
+
("api_key", "AMD_API_KEY"),
|
|
55
|
+
]
|
|
56
|
+
)
|
|
57
|
+
def __init__(
|
|
58
|
+
self,
|
|
59
|
+
model_type: Union[ModelType, str],
|
|
60
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
61
|
+
api_key: Optional[str] = None,
|
|
62
|
+
url: Optional[str] = None,
|
|
63
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
64
|
+
timeout: Optional[float] = None,
|
|
65
|
+
max_retries: int = 3,
|
|
66
|
+
**kwargs: Any,
|
|
67
|
+
) -> None:
|
|
68
|
+
if model_config_dict is None:
|
|
69
|
+
model_config_dict = AMDConfig().as_dict()
|
|
70
|
+
api_key = api_key or os.environ.get("AMD_API_KEY")
|
|
71
|
+
url = url or os.environ.get(
|
|
72
|
+
"AMD_API_BASE_URL", "https://llm-api.amd.com"
|
|
73
|
+
)
|
|
74
|
+
headers = {'Ocp-Apim-Subscription-Key': api_key}
|
|
75
|
+
kwargs["default_headers"] = headers
|
|
76
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
77
|
+
super().__init__(
|
|
78
|
+
model_type=model_type,
|
|
79
|
+
model_config_dict=model_config_dict,
|
|
80
|
+
api_key=api_key,
|
|
81
|
+
url=url,
|
|
82
|
+
token_counter=token_counter,
|
|
83
|
+
timeout=timeout,
|
|
84
|
+
max_retries=max_retries,
|
|
85
|
+
**kwargs,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
def check_model_config(self):
|
|
89
|
+
r"""Check whether the model configuration contains any
|
|
90
|
+
unexpected arguments to AMD API.
|
|
91
|
+
|
|
92
|
+
Raises:
|
|
93
|
+
ValueError: If the model configuration dictionary contains any
|
|
94
|
+
unexpected arguments to AMD API.
|
|
95
|
+
"""
|
|
96
|
+
for param in self.model_config_dict:
|
|
97
|
+
if param not in AMD_API_PARAMS:
|
|
98
|
+
raise ValueError(
|
|
99
|
+
f"Unexpected argument `{param}` is "
|
|
100
|
+
"input into AMD model backend."
|
|
101
|
+
)
|
camel/models/model_factory.py
CHANGED
|
@@ -16,6 +16,7 @@ import os
|
|
|
16
16
|
from typing import ClassVar, Dict, Optional, Type, Union
|
|
17
17
|
|
|
18
18
|
from camel.models.aiml_model import AIMLModel
|
|
19
|
+
from camel.models.amd_model import AMDModel
|
|
19
20
|
from camel.models.anthropic_model import AnthropicModel
|
|
20
21
|
from camel.models.aws_bedrock_model import AWSBedrockModel
|
|
21
22
|
from camel.models.azure_openai_model import AzureOpenAIModel
|
|
@@ -77,6 +78,7 @@ class ModelFactory:
|
|
|
77
78
|
ModelPlatformType.AWS_BEDROCK: AWSBedrockModel,
|
|
78
79
|
ModelPlatformType.NVIDIA: NvidiaModel,
|
|
79
80
|
ModelPlatformType.SILICONFLOW: SiliconFlowModel,
|
|
81
|
+
ModelPlatformType.AMD: AMDModel,
|
|
80
82
|
ModelPlatformType.AIML: AIMLModel,
|
|
81
83
|
ModelPlatformType.VOLCANO: VolcanoModel,
|
|
82
84
|
ModelPlatformType.NETMIND: NetmindModel,
|
camel/models/openai_model.py
CHANGED
|
@@ -303,9 +303,6 @@ class OpenAIModel(BaseModelBackend):
|
|
|
303
303
|
is_streaming = self.model_config_dict.get("stream", False)
|
|
304
304
|
|
|
305
305
|
if response_format:
|
|
306
|
-
result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
|
|
307
|
-
self._request_parse(messages, response_format, tools)
|
|
308
|
-
)
|
|
309
306
|
if is_streaming:
|
|
310
307
|
# Use streaming parse for structured output
|
|
311
308
|
return self._request_stream_parse(
|
|
@@ -377,9 +374,6 @@ class OpenAIModel(BaseModelBackend):
|
|
|
377
374
|
is_streaming = self.model_config_dict.get("stream", False)
|
|
378
375
|
|
|
379
376
|
if response_format:
|
|
380
|
-
result: Union[
|
|
381
|
-
ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
382
|
-
] = await self._arequest_parse(messages, response_format, tools)
|
|
383
377
|
if is_streaming:
|
|
384
378
|
# Use streaming parse for structured output
|
|
385
379
|
return await self._arequest_stream_parse(
|
|
@@ -16,7 +16,7 @@ import inspect
|
|
|
16
16
|
import json
|
|
17
17
|
import os
|
|
18
18
|
from functools import wraps
|
|
19
|
-
from typing import Any, Dict, List, Optional, Union
|
|
19
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
|
20
20
|
|
|
21
21
|
from pydantic import BaseModel
|
|
22
22
|
|
|
@@ -49,7 +49,7 @@ class DaytonaRuntime(BaseRuntime):
|
|
|
49
49
|
api_url: Optional[str] = None,
|
|
50
50
|
language: Optional[str] = "python",
|
|
51
51
|
):
|
|
52
|
-
from daytona_sdk import Daytona, DaytonaConfig
|
|
52
|
+
from daytona_sdk import Daytona, DaytonaConfig, Sandbox
|
|
53
53
|
|
|
54
54
|
super().__init__()
|
|
55
55
|
self.api_key = api_key or os.environ.get('DAYTONA_API_KEY')
|
|
@@ -57,7 +57,7 @@ class DaytonaRuntime(BaseRuntime):
|
|
|
57
57
|
self.language = language
|
|
58
58
|
self.config = DaytonaConfig(api_key=self.api_key, api_url=self.api_url)
|
|
59
59
|
self.daytona = Daytona(self.config)
|
|
60
|
-
self.sandbox = None
|
|
60
|
+
self.sandbox: Optional[Sandbox] = None
|
|
61
61
|
self.entrypoint: Dict[str, str] = dict()
|
|
62
62
|
|
|
63
63
|
def build(self) -> "DaytonaRuntime":
|
|
@@ -66,10 +66,10 @@ class DaytonaRuntime(BaseRuntime):
|
|
|
66
66
|
Returns:
|
|
67
67
|
DaytonaRuntime: The current runtime.
|
|
68
68
|
"""
|
|
69
|
-
from daytona_sdk import
|
|
69
|
+
from daytona_sdk import CreateSandboxBaseParams
|
|
70
70
|
|
|
71
71
|
try:
|
|
72
|
-
params =
|
|
72
|
+
params = CreateSandboxBaseParams(language=self.language)
|
|
73
73
|
self.sandbox = self.daytona.create(params)
|
|
74
74
|
if self.sandbox is None:
|
|
75
75
|
raise RuntimeError("Failed to create sandbox.")
|
|
@@ -83,7 +83,7 @@ class DaytonaRuntime(BaseRuntime):
|
|
|
83
83
|
r"""Clean up the sandbox when exiting."""
|
|
84
84
|
if self.sandbox:
|
|
85
85
|
try:
|
|
86
|
-
self.daytona.
|
|
86
|
+
self.daytona.delete(self.sandbox)
|
|
87
87
|
logger.info(f"Sandbox {self.sandbox.id} removed")
|
|
88
88
|
self.sandbox = None
|
|
89
89
|
except Exception as e:
|
|
@@ -112,7 +112,7 @@ class DaytonaRuntime(BaseRuntime):
|
|
|
112
112
|
if arguments is not None:
|
|
113
113
|
entrypoint += json.dumps(arguments, ensure_ascii=False)
|
|
114
114
|
|
|
115
|
-
def make_wrapper(inner_func, func_name, func_code):
|
|
115
|
+
def make_wrapper(inner_func: Callable, func_name: str, func_code: str):
|
|
116
116
|
r"""Creates a wrapper for a function to execute it in the
|
|
117
117
|
Daytona sandbox.
|
|
118
118
|
|
|
@@ -208,12 +208,11 @@ class DaytonaRuntime(BaseRuntime):
|
|
|
208
208
|
RuntimeError: If the sandbox is not initialized.
|
|
209
209
|
"""
|
|
210
210
|
if self.sandbox is None:
|
|
211
|
-
raise RuntimeError("
|
|
212
|
-
info = self.sandbox.info()
|
|
211
|
+
raise RuntimeError("Sandbox not initialized.")
|
|
213
212
|
return (
|
|
214
|
-
f"Sandbox {
|
|
215
|
-
f"State: {
|
|
216
|
-
f"Resources: {
|
|
213
|
+
f"Sandbox {self.sandbox.id}:\n"
|
|
214
|
+
f"State: {self.sandbox.state}\n"
|
|
215
|
+
f"Resources: {self.sandbox.cpu} CPU, {self.sandbox.memory} RAM"
|
|
217
216
|
)
|
|
218
217
|
|
|
219
218
|
def __del__(self):
|
camel/toolkits/__init__.py
CHANGED
|
@@ -23,7 +23,7 @@ from .open_api_specs.security_config import openapi_security_config
|
|
|
23
23
|
from .math_toolkit import MathToolkit
|
|
24
24
|
from .search_toolkit import SearchToolkit
|
|
25
25
|
from .weather_toolkit import WeatherToolkit
|
|
26
|
-
from .
|
|
26
|
+
from .image_generation_toolkit import ImageGenToolkit, OpenAIImageToolkit
|
|
27
27
|
from .ask_news_toolkit import AskNewsToolkit, AsyncAskNewsToolkit
|
|
28
28
|
from .linkedin_toolkit import LinkedInToolkit
|
|
29
29
|
from .reddit_toolkit import RedditToolkit
|
|
@@ -88,6 +88,7 @@ from .web_deploy_toolkit import WebDeployToolkit
|
|
|
88
88
|
from .screenshot_toolkit import ScreenshotToolkit
|
|
89
89
|
from .message_integration import ToolkitMessageIntegration
|
|
90
90
|
from .notion_mcp_toolkit import NotionMCPToolkit
|
|
91
|
+
from .minimax_mcp_toolkit import MinimaxMCPToolkit
|
|
91
92
|
|
|
92
93
|
__all__ = [
|
|
93
94
|
'BaseToolkit',
|
|
@@ -102,7 +103,7 @@ __all__ = [
|
|
|
102
103
|
'SearchToolkit',
|
|
103
104
|
'SlackToolkit',
|
|
104
105
|
'WhatsAppToolkit',
|
|
105
|
-
'
|
|
106
|
+
'ImageGenToolkit',
|
|
106
107
|
'TwitterToolkit',
|
|
107
108
|
'WeatherToolkit',
|
|
108
109
|
'RetrievalToolkit',
|
|
@@ -151,7 +152,7 @@ __all__ = [
|
|
|
151
152
|
'PlaywrightMCPToolkit',
|
|
152
153
|
'WolframAlphaToolkit',
|
|
153
154
|
'BohriumToolkit',
|
|
154
|
-
'OpenAIImageToolkit',
|
|
155
|
+
'OpenAIImageToolkit', # Backward compatibility
|
|
155
156
|
'TaskPlanningToolkit',
|
|
156
157
|
'HybridBrowserToolkit',
|
|
157
158
|
'EdgeOnePagesMCPToolkit',
|
|
@@ -165,4 +166,5 @@ __all__ = [
|
|
|
165
166
|
'RegisteredAgentToolkit',
|
|
166
167
|
'ToolkitMessageIntegration',
|
|
167
168
|
'NotionMCPToolkit',
|
|
169
|
+
'MinimaxMCPToolkit',
|
|
168
170
|
]
|