camel-ai 0.2.75a5__py3-none-any.whl → 0.2.76a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +298 -130
- camel/configs/__init__.py +6 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/nebius_config.py +103 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/models/__init__.py +4 -0
- camel/models/amd_model.py +101 -0
- camel/models/model_factory.py +4 -0
- camel/models/nebius_model.py +83 -0
- camel/models/ollama_model.py +3 -3
- camel/models/openai_model.py +0 -6
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/workforce/task_channel.py +120 -27
- camel/societies/workforce/workforce.py +35 -3
- camel/toolkits/__init__.py +5 -3
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/function_tool.py +6 -1
- camel/toolkits/github_toolkit.py +104 -17
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +8 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +12 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +33 -14
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +135 -40
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +2 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +43 -207
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +231 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +248 -58
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +5 -1
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +39 -14
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/search_toolkit.py +13 -2
- camel/toolkits/terminal_toolkit.py +12 -2
- camel/toolkits/video_analysis_toolkit.py +16 -10
- camel/types/enums.py +42 -0
- camel/types/unified_model_type.py +5 -0
- camel/utils/commons.py +2 -0
- camel/utils/mcp.py +136 -2
- {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/METADATA +5 -11
- {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/RECORD +47 -38
- {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import List, Optional, Union
|
|
17
|
+
|
|
18
|
+
from pydantic import Field
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
from camel.types import NotGiven
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AMDConfig(BaseConfig):
|
|
25
|
+
r"""Configuration class for AMD API models.
|
|
26
|
+
|
|
27
|
+
This class defines the configuration parameters for AMD's language
|
|
28
|
+
models, including temperature, sampling parameters, and response format
|
|
29
|
+
settings.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
stream (bool, optional): Whether to stream the response.
|
|
33
|
+
(default: :obj:`None`)
|
|
34
|
+
temperature (float, optional): Controls randomness in the response.
|
|
35
|
+
Higher values make output more random, lower values make it more
|
|
36
|
+
deterministic. Range: [0.0, 2.0]. (default: :obj:`None`)
|
|
37
|
+
top_p (float, optional): Controls diversity via nucleus sampling.
|
|
38
|
+
Range: [0.0, 1.0]. (default: :obj:`None`)
|
|
39
|
+
presence_penalty (float, optional): Penalizes new tokens based on
|
|
40
|
+
whether they appear in the text so far. Range: [-2.0, 2.0].
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
frequency_penalty (float, optional): Penalizes new tokens based on
|
|
43
|
+
their frequency in the text so far. Range: [-2.0, 2.0].
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
max_tokens (Union[int, NotGiven], optional): Maximum number of tokens
|
|
46
|
+
to generate. If not provided, model will use its default maximum.
|
|
47
|
+
(default: :obj:`None`)
|
|
48
|
+
seed (Optional[int], optional): Random seed for deterministic sampling.
|
|
49
|
+
(default: :obj:`None`)
|
|
50
|
+
tools (Optional[List[Dict]], optional): List of tools available to the
|
|
51
|
+
model. This includes tools such as a text editor, a calculator, or
|
|
52
|
+
a search engine. (default: :obj:`None`)
|
|
53
|
+
tool_choice (Optional[str], optional): Tool choice configuration.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (Optional[List[str]], optional): List of stop sequences.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
stream: Optional[bool] = Field(default=None)
|
|
60
|
+
temperature: Optional[float] = Field(default=None)
|
|
61
|
+
top_p: Optional[float] = Field(default=None)
|
|
62
|
+
presence_penalty: Optional[float] = Field(default=None)
|
|
63
|
+
frequency_penalty: Optional[float] = Field(default=None)
|
|
64
|
+
max_tokens: Optional[Union[int, NotGiven]] = Field(default=None)
|
|
65
|
+
seed: Optional[int] = Field(default=None)
|
|
66
|
+
tool_choice: Optional[str] = Field(default=None)
|
|
67
|
+
stop: Optional[List[str]] = Field(default=None)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
AMD_API_PARAMS = {param for param in AMDConfig.model_fields.keys()}
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Optional, Sequence, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class NebiusConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using OpenAI
|
|
23
|
+
compatibility with Nebius AI Studio.
|
|
24
|
+
|
|
25
|
+
Reference: https://nebius.com/docs/ai-studio/api
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
29
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
30
|
+
while lower values make it more focused and deterministic.
|
|
31
|
+
(default: :obj:`None`)
|
|
32
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
33
|
+
called nucleus sampling, where the model considers the results of
|
|
34
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
35
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
n (int, optional): How many chat completion choices to generate for
|
|
38
|
+
each input message. (default: :obj:`None`)
|
|
39
|
+
response_format (object, optional): An object specifying the format
|
|
40
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
41
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
42
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
43
|
+
message the model generates is valid JSON. Important: when using
|
|
44
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
45
|
+
yourself via a system or user message. Without this, the model
|
|
46
|
+
may generate an unending stream of whitespace until the generation
|
|
47
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
48
|
+
"stuck" request. Also note that the message content may be
|
|
49
|
+
partially cut off if finish_reason="length", which indicates the
|
|
50
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
51
|
+
max context length.
|
|
52
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
53
|
+
as data-only server-sent events as they become available.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
56
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
57
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
58
|
+
in the chat completion. The total length of input tokens and
|
|
59
|
+
generated tokens is limited by the model's context length.
|
|
60
|
+
(default: :obj:`None`)
|
|
61
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
62
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
63
|
+
they appear in the text so far, increasing the model's likelihood
|
|
64
|
+
to talk about new topics. See more information about frequency and
|
|
65
|
+
presence penalties. (default: :obj:`None`)
|
|
66
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
67
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
68
|
+
existing frequency in the text so far, decreasing the model's
|
|
69
|
+
likelihood to repeat the same line verbatim. See more information
|
|
70
|
+
about frequency and presence penalties. (default: :obj:`None`)
|
|
71
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
72
|
+
which can help OpenAI to monitor and detect abuse.
|
|
73
|
+
(default: :obj:`None`)
|
|
74
|
+
tools (list[FunctionTool], optional): A list of tools the model may
|
|
75
|
+
call. Currently, only functions are supported as a tool. Use this
|
|
76
|
+
to provide a list of functions the model may generate JSON inputs
|
|
77
|
+
for. A max of 128 functions are supported.
|
|
78
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
79
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
80
|
+
will not call any tool and instead generates a message.
|
|
81
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
82
|
+
message or calling one or more tools. :obj:`"required"` means the
|
|
83
|
+
model must call one or more tools. Specifying a particular tool
|
|
84
|
+
via {"type": "function", "function": {"name": "my_function"}}
|
|
85
|
+
forces the model to call that tool. :obj:`"none"` is the default
|
|
86
|
+
when no tools are present. :obj:`"auto"` is the default if tools
|
|
87
|
+
are present.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
temperature: Optional[float] = None
|
|
91
|
+
top_p: Optional[float] = None
|
|
92
|
+
n: Optional[int] = None
|
|
93
|
+
stream: Optional[bool] = None
|
|
94
|
+
stop: Optional[Union[str, Sequence[str]]] = None
|
|
95
|
+
max_tokens: Optional[int] = None
|
|
96
|
+
presence_penalty: Optional[float] = None
|
|
97
|
+
response_format: Optional[dict] = None
|
|
98
|
+
frequency_penalty: Optional[float] = None
|
|
99
|
+
user: Optional[str] = None
|
|
100
|
+
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
NEBIUS_API_PARAMS = {param for param in NebiusConfig.model_fields.keys()}
|
camel/interpreters/__init__.py
CHANGED
|
@@ -18,6 +18,7 @@ from .e2b_interpreter import E2BInterpreter
|
|
|
18
18
|
from .internal_python_interpreter import InternalPythonInterpreter
|
|
19
19
|
from .interpreter_error import InterpreterError
|
|
20
20
|
from .ipython_interpreter import JupyterKernelInterpreter
|
|
21
|
+
from .microsandbox_interpreter import MicrosandboxInterpreter
|
|
21
22
|
from .subprocess_interpreter import SubprocessInterpreter
|
|
22
23
|
|
|
23
24
|
__all__ = [
|
|
@@ -28,4 +29,5 @@ __all__ = [
|
|
|
28
29
|
'DockerInterpreter',
|
|
29
30
|
'JupyterKernelInterpreter',
|
|
30
31
|
'E2BInterpreter',
|
|
32
|
+
'MicrosandboxInterpreter',
|
|
31
33
|
]
|
|
@@ -0,0 +1,395 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import asyncio
|
|
15
|
+
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Union
|
|
16
|
+
|
|
17
|
+
from camel.interpreters.base import BaseInterpreter
|
|
18
|
+
from camel.interpreters.interpreter_error import InterpreterError
|
|
19
|
+
from camel.logger import get_logger
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MicrosandboxInterpreter(BaseInterpreter):
|
|
25
|
+
r"""Microsandbox Code Interpreter implementation.
|
|
26
|
+
|
|
27
|
+
This interpreter provides secure code execution using microsandbox,
|
|
28
|
+
a self-hosted platform for secure execution of untrusted user/AI code.
|
|
29
|
+
It supports Python code execution via PythonSandbox, JavaScript/Node.js
|
|
30
|
+
code execution via NodeSandbox, and shell commands via the command
|
|
31
|
+
interface.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
require_confirm (bool, optional): If True, prompt user before running
|
|
35
|
+
code strings for security. (default: :obj:`True`)
|
|
36
|
+
server_url (str, optional): URL of the microsandbox server. If not
|
|
37
|
+
provided, will use MSB_SERVER_URL environment variable, then
|
|
38
|
+
fall back to http://127.0.0.1:5555. (default: :obj:`None`)
|
|
39
|
+
api_key (str, optional): API key for microsandbox authentication.
|
|
40
|
+
If not provided, will use MSB_API_KEY environment variable.
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
namespace (str, optional): Namespace for the sandbox.
|
|
43
|
+
(default: :obj:`"default"`)
|
|
44
|
+
sandbox_name (str, optional): Name of the sandbox instance. If not
|
|
45
|
+
provided, a random name will be generated by the SDK.
|
|
46
|
+
(default: :obj:`None`)
|
|
47
|
+
timeout (int, optional): Default timeout for code execution in seconds.
|
|
48
|
+
(default: :obj:`30`)
|
|
49
|
+
|
|
50
|
+
Environment Variables:
|
|
51
|
+
MSB_SERVER_URL: URL of the microsandbox server.
|
|
52
|
+
MSB_API_KEY: API key for microsandbox authentication.
|
|
53
|
+
|
|
54
|
+
Note:
|
|
55
|
+
The SDK handles parameter priority as: user parameter > environment
|
|
56
|
+
variable > default value.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
_CODE_TYPE_MAPPING: ClassVar[Dict[str, str]] = {
|
|
60
|
+
# Python code - uses PythonSandbox
|
|
61
|
+
"python": "python_sandbox",
|
|
62
|
+
"py3": "python_sandbox",
|
|
63
|
+
"python3": "python_sandbox",
|
|
64
|
+
"py": "python_sandbox",
|
|
65
|
+
# JavaScript/Node.js code - uses NodeSandbox
|
|
66
|
+
"javascript": "node_sandbox",
|
|
67
|
+
"js": "node_sandbox",
|
|
68
|
+
"node": "node_sandbox",
|
|
69
|
+
"typescript": "node_sandbox",
|
|
70
|
+
"ts": "node_sandbox",
|
|
71
|
+
# Shell commands - uses command.run()
|
|
72
|
+
"bash": "shell_command",
|
|
73
|
+
"shell": "shell_command",
|
|
74
|
+
"sh": "shell_command",
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
def __init__(
|
|
78
|
+
self,
|
|
79
|
+
require_confirm: bool = True,
|
|
80
|
+
server_url: Optional[str] = None,
|
|
81
|
+
api_key: Optional[str] = None,
|
|
82
|
+
namespace: str = "default",
|
|
83
|
+
sandbox_name: Optional[str] = None,
|
|
84
|
+
timeout: int = 30,
|
|
85
|
+
) -> None:
|
|
86
|
+
from microsandbox import (
|
|
87
|
+
NodeSandbox,
|
|
88
|
+
PythonSandbox,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Store parameters, let SDK handle defaults and environment variables
|
|
92
|
+
self.require_confirm = require_confirm
|
|
93
|
+
self.server_url = server_url # None means use SDK default logic
|
|
94
|
+
self.api_key = api_key # None means use SDK default logic
|
|
95
|
+
self.namespace = namespace
|
|
96
|
+
self.sandbox_name = (
|
|
97
|
+
sandbox_name # None means SDK generates random name
|
|
98
|
+
)
|
|
99
|
+
self.timeout = timeout
|
|
100
|
+
|
|
101
|
+
# Store sandbox configuration
|
|
102
|
+
self._sandbox_config = {
|
|
103
|
+
"server_url": self.server_url,
|
|
104
|
+
"namespace": self.namespace,
|
|
105
|
+
"name": self.sandbox_name,
|
|
106
|
+
"api_key": self.api_key,
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
# Store sandbox classes for reuse
|
|
110
|
+
self._PythonSandbox = PythonSandbox
|
|
111
|
+
self._NodeSandbox = NodeSandbox
|
|
112
|
+
|
|
113
|
+
# Log initialization info
|
|
114
|
+
logger.info("Initialized MicrosandboxInterpreter")
|
|
115
|
+
logger.info(f"Namespace: {self.namespace}")
|
|
116
|
+
if self.sandbox_name:
|
|
117
|
+
logger.info(f"Sandbox name: {self.sandbox_name}")
|
|
118
|
+
else:
|
|
119
|
+
logger.info("Sandbox name: will be auto-generated by SDK")
|
|
120
|
+
|
|
121
|
+
def run(
|
|
122
|
+
self,
|
|
123
|
+
code: str,
|
|
124
|
+
code_type: str = "python",
|
|
125
|
+
) -> str:
|
|
126
|
+
r"""Executes the given code in the microsandbox.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
code (str): The code string to execute.
|
|
130
|
+
code_type (str): The type of code to execute. Supported types:
|
|
131
|
+
'python', 'javascript', 'bash'. (default: :obj:`python`)
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
str: The string representation of the output of the executed code.
|
|
135
|
+
|
|
136
|
+
Raises:
|
|
137
|
+
InterpreterError: If the `code_type` is not supported or if any
|
|
138
|
+
runtime error occurs during the execution of the code.
|
|
139
|
+
"""
|
|
140
|
+
if code_type not in self._CODE_TYPE_MAPPING:
|
|
141
|
+
raise InterpreterError(
|
|
142
|
+
f"Unsupported code type {code_type}. "
|
|
143
|
+
f"`{self.__class__.__name__}` only supports "
|
|
144
|
+
f"{', '.join(list(self._CODE_TYPE_MAPPING.keys()))}."
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Print code for security checking
|
|
148
|
+
if self.require_confirm:
|
|
149
|
+
logger.info(
|
|
150
|
+
f"The following {code_type} code will run on "
|
|
151
|
+
f"microsandbox: {code}"
|
|
152
|
+
)
|
|
153
|
+
self._confirm_execution("code")
|
|
154
|
+
|
|
155
|
+
# Run the code asynchronously
|
|
156
|
+
return asyncio.run(self._run_async(code, code_type))
|
|
157
|
+
|
|
158
|
+
async def _run_async(self, code: str, code_type: str) -> str:
|
|
159
|
+
r"""Asynchronously executes code in microsandbox.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
code (str): The code to execute.
|
|
163
|
+
code_type (str): The type of code to execute.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
str: The output of the executed code.
|
|
167
|
+
|
|
168
|
+
Raises:
|
|
169
|
+
InterpreterError: If execution fails.
|
|
170
|
+
"""
|
|
171
|
+
try:
|
|
172
|
+
execution_method = self._CODE_TYPE_MAPPING[code_type]
|
|
173
|
+
|
|
174
|
+
if execution_method == "python_sandbox":
|
|
175
|
+
return await self._run_python_code(code)
|
|
176
|
+
elif execution_method == "node_sandbox":
|
|
177
|
+
return await self._run_node_code(code)
|
|
178
|
+
elif execution_method == "shell_command":
|
|
179
|
+
return await self._run_shell_command(code)
|
|
180
|
+
else:
|
|
181
|
+
raise InterpreterError(
|
|
182
|
+
f"Unsupported execution method: {execution_method}"
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
except Exception as e:
|
|
186
|
+
raise InterpreterError(
|
|
187
|
+
f"Error executing code in microsandbox: {e}"
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
async def _run_python_code(self, code: str) -> str:
|
|
191
|
+
r"""Execute Python code using PythonSandbox.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
code (str): Python code to execute.
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
str: Execution output.
|
|
198
|
+
"""
|
|
199
|
+
async with self._PythonSandbox.create(
|
|
200
|
+
**self._sandbox_config
|
|
201
|
+
) as sandbox:
|
|
202
|
+
execution = await asyncio.wait_for(
|
|
203
|
+
sandbox.run(code), timeout=self.timeout
|
|
204
|
+
)
|
|
205
|
+
return await self._get_execution_output(execution)
|
|
206
|
+
|
|
207
|
+
async def _run_node_code(self, code: str) -> str:
|
|
208
|
+
r"""Execute JavaScript/Node.js code using NodeSandbox.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
code (str): JavaScript/Node.js code to execute.
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
str: Execution output.
|
|
215
|
+
"""
|
|
216
|
+
async with self._NodeSandbox.create(**self._sandbox_config) as sandbox:
|
|
217
|
+
execution = await asyncio.wait_for(
|
|
218
|
+
sandbox.run(code), timeout=self.timeout
|
|
219
|
+
)
|
|
220
|
+
return await self._get_execution_output(execution)
|
|
221
|
+
|
|
222
|
+
async def _run_shell_command(self, code: str) -> str:
|
|
223
|
+
r"""Execute shell commands directly.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
code (str): Shell command to execute.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
str: Command execution output.
|
|
230
|
+
"""
|
|
231
|
+
# Use any sandbox for shell commands
|
|
232
|
+
async with self._PythonSandbox.create(
|
|
233
|
+
**self._sandbox_config
|
|
234
|
+
) as sandbox:
|
|
235
|
+
execution = await asyncio.wait_for(
|
|
236
|
+
sandbox.command.run("bash", ["-c", code]), timeout=self.timeout
|
|
237
|
+
)
|
|
238
|
+
return await self._get_command_output(execution)
|
|
239
|
+
|
|
240
|
+
async def _get_execution_output(self, execution) -> str:
|
|
241
|
+
r"""Get output from code execution.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
execution: Execution object from sandbox.run().
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
str: Formatted execution output.
|
|
248
|
+
"""
|
|
249
|
+
output = await execution.output()
|
|
250
|
+
error = await execution.error()
|
|
251
|
+
|
|
252
|
+
result_parts = []
|
|
253
|
+
if output and output.strip():
|
|
254
|
+
result_parts.append(output.strip())
|
|
255
|
+
if error and error.strip():
|
|
256
|
+
result_parts.append(f"STDERR: {error.strip()}")
|
|
257
|
+
|
|
258
|
+
return (
|
|
259
|
+
"\n".join(result_parts)
|
|
260
|
+
if result_parts
|
|
261
|
+
else "Code executed successfully (no output)"
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
async def _get_command_output(self, execution) -> str:
|
|
265
|
+
r"""Get output from command execution.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
execution: CommandExecution object from sandbox.command.run().
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
str: Formatted command output.
|
|
272
|
+
"""
|
|
273
|
+
output = await execution.output()
|
|
274
|
+
error = await execution.error()
|
|
275
|
+
|
|
276
|
+
result_parts = []
|
|
277
|
+
if output and output.strip():
|
|
278
|
+
result_parts.append(output.strip())
|
|
279
|
+
if error and error.strip():
|
|
280
|
+
result_parts.append(f"STDERR: {error.strip()}")
|
|
281
|
+
if hasattr(execution, 'exit_code') and execution.exit_code != 0:
|
|
282
|
+
result_parts.append(f"Exit code: {execution.exit_code}")
|
|
283
|
+
|
|
284
|
+
return (
|
|
285
|
+
"\n".join(result_parts)
|
|
286
|
+
if result_parts
|
|
287
|
+
else "Command executed successfully (no output)"
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
def _confirm_execution(self, execution_type: str) -> None:
|
|
291
|
+
r"""Prompt user for confirmation before executing code or commands.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
execution_type (str): Type of execution ('code' or 'command').
|
|
295
|
+
|
|
296
|
+
Raises:
|
|
297
|
+
InterpreterError: If user declines to run the code/command.
|
|
298
|
+
"""
|
|
299
|
+
while True:
|
|
300
|
+
choice = input(f"Running {execution_type}? [Y/n]:").lower()
|
|
301
|
+
if choice in ["y", "yes", "ye"]:
|
|
302
|
+
break
|
|
303
|
+
elif choice not in ["no", "n"]:
|
|
304
|
+
continue
|
|
305
|
+
raise InterpreterError(
|
|
306
|
+
f"Execution halted: User opted not to run the "
|
|
307
|
+
f"{execution_type}. "
|
|
308
|
+
f"This choice stops the current operation and any "
|
|
309
|
+
f"further {execution_type} execution."
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
def supported_code_types(self) -> List[str]:
|
|
313
|
+
r"""Provides supported code types by the interpreter."""
|
|
314
|
+
return list(self._CODE_TYPE_MAPPING.keys())
|
|
315
|
+
|
|
316
|
+
def update_action_space(self, action_space: Dict[str, Any]) -> None:
|
|
317
|
+
r"""Updates action space for interpreter.
|
|
318
|
+
|
|
319
|
+
Args:
|
|
320
|
+
action_space: Action space dictionary (unused in microsandbox).
|
|
321
|
+
|
|
322
|
+
Note:
|
|
323
|
+
Microsandbox doesn't support action space updates as it runs
|
|
324
|
+
in isolated environments for each execution.
|
|
325
|
+
"""
|
|
326
|
+
# Explicitly acknowledge the parameter to avoid linting warnings
|
|
327
|
+
_ = action_space
|
|
328
|
+
logger.warning(
|
|
329
|
+
"Microsandbox doesn't support action space updates. "
|
|
330
|
+
"Code runs in isolated environments for each execution."
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
def execute_command(self, command: str) -> Union[str, Tuple[str, str]]:
|
|
334
|
+
r"""Execute a shell command in the microsandbox.
|
|
335
|
+
|
|
336
|
+
This method is designed for package management and system
|
|
337
|
+
administration tasks. It executes shell commands directly
|
|
338
|
+
using the microsandbox command interface.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
command (str): The shell command to execute (e.g.,
|
|
342
|
+
"pip install numpy", "ls -la", "apt-get update").
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
Union[str, Tuple[str, str]]: The output of the command.
|
|
346
|
+
|
|
347
|
+
Examples:
|
|
348
|
+
>>> interpreter.execute_command("pip install numpy")
|
|
349
|
+
>>> interpreter.execute_command("npm install express")
|
|
350
|
+
>>> interpreter.execute_command("ls -la /tmp")
|
|
351
|
+
"""
|
|
352
|
+
# Print command for security checking
|
|
353
|
+
if self.require_confirm:
|
|
354
|
+
logger.info(
|
|
355
|
+
f"The following shell command will run on "
|
|
356
|
+
f"microsandbox: {command}"
|
|
357
|
+
)
|
|
358
|
+
self._confirm_execution("command")
|
|
359
|
+
|
|
360
|
+
return asyncio.run(self._execute_command_async(command))
|
|
361
|
+
|
|
362
|
+
async def _execute_command_async(self, command: str) -> str:
|
|
363
|
+
r"""Asynchronously executes a shell command in microsandbox.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
command (str): The shell command to execute.
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
str: The output of the command execution.
|
|
370
|
+
|
|
371
|
+
Raises:
|
|
372
|
+
InterpreterError: If execution fails.
|
|
373
|
+
"""
|
|
374
|
+
try:
|
|
375
|
+
async with self._PythonSandbox.create(
|
|
376
|
+
**self._sandbox_config
|
|
377
|
+
) as sandbox:
|
|
378
|
+
execution = await asyncio.wait_for(
|
|
379
|
+
sandbox.command.run("bash", ["-c", command]),
|
|
380
|
+
timeout=self.timeout,
|
|
381
|
+
)
|
|
382
|
+
return await self._get_command_output(execution)
|
|
383
|
+
|
|
384
|
+
except Exception as e:
|
|
385
|
+
raise InterpreterError(
|
|
386
|
+
f"Error executing command in microsandbox: {e}"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
def __del__(self) -> None:
|
|
390
|
+
r"""Destructor for the MicrosandboxInterpreter class.
|
|
391
|
+
|
|
392
|
+
Microsandbox uses context managers for resource management,
|
|
393
|
+
so no explicit cleanup is needed.
|
|
394
|
+
"""
|
|
395
|
+
logger.debug("MicrosandboxInterpreter cleaned up")
|
camel/models/__init__.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from .aiml_model import AIMLModel
|
|
15
|
+
from .amd_model import AMDModel
|
|
15
16
|
from .anthropic_model import AnthropicModel
|
|
16
17
|
from .aws_bedrock_model import AWSBedrockModel
|
|
17
18
|
from .azure_openai_model import AzureOpenAIModel
|
|
@@ -31,6 +32,7 @@ from .model_factory import ModelFactory
|
|
|
31
32
|
from .model_manager import ModelManager, ModelProcessingError
|
|
32
33
|
from .modelscope_model import ModelScopeModel
|
|
33
34
|
from .moonshot_model import MoonshotModel
|
|
35
|
+
from .nebius_model import NebiusModel
|
|
34
36
|
from .nemotron_model import NemotronModel
|
|
35
37
|
from .netmind_model import NetmindModel
|
|
36
38
|
from .novita_model import NovitaModel
|
|
@@ -61,6 +63,7 @@ __all__ = [
|
|
|
61
63
|
'OpenRouterModel',
|
|
62
64
|
'AzureOpenAIModel',
|
|
63
65
|
'AnthropicModel',
|
|
66
|
+
'AMDModel',
|
|
64
67
|
'MistralModel',
|
|
65
68
|
'GroqModel',
|
|
66
69
|
'StubModel',
|
|
@@ -87,6 +90,7 @@ __all__ = [
|
|
|
87
90
|
'QwenModel',
|
|
88
91
|
'AWSBedrockModel',
|
|
89
92
|
'ModelProcessingError',
|
|
93
|
+
'NebiusModel',
|
|
90
94
|
'DeepSeekModel',
|
|
91
95
|
'FishAudioModel',
|
|
92
96
|
'InternLMModel',
|