camel-ai 0.2.73a4__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +2217 -519
- camel/agents/mcp_agent.py +30 -27
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datasets/base_generator.py +39 -10
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +3 -12
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/chunkr_reader.py +9 -0
- camel/memories/agent_memories.py +48 -4
- camel/memories/base.py +26 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/context_creators/score_based.py +25 -384
- camel/memories/records.py +88 -8
- camel/messages/base.py +153 -34
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +6 -19
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +114 -89
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +32 -14
- camel/models/cohere_model.py +1 -16
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +1 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +36 -18
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +1 -16
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +105 -24
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +62 -41
- camel/models/openai_model.py +62 -57
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/__init__.py +2 -0
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +146 -66
- camel/societies/workforce/role_playing_worker.py +15 -11
- camel/societies/workforce/single_agent_worker.py +302 -65
- camel/societies/workforce/structured_output_handler.py +30 -18
- camel/societies/workforce/task_channel.py +163 -27
- camel/societies/workforce/utils.py +107 -13
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +1949 -579
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +168 -145
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +4 -3
- camel/toolkits/__init__.py +20 -7
- camel/toolkits/aci_toolkit.py +45 -0
- camel/toolkits/base.py +6 -4
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/dappier_toolkit.py +5 -1
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
- camel/toolkits/excel_toolkit.py +1 -1
- camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +430 -36
- camel/toolkits/function_tool.py +13 -3
- camel/toolkits/github_toolkit.py +104 -17
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +12 -31
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +15 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +77 -8
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +884 -88
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +5 -612
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +0 -1
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +959 -89
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +9 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +281 -213
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +23 -3
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +72 -7
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +582 -132
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +158 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +55 -8
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +43 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +321 -8
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +10 -4
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +45 -4
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +151 -53
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +27 -1
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +366 -71
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_integration.py +18 -13
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +19 -10
- camel/toolkits/notion_mcp_toolkit.py +16 -26
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +8 -49
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/search_toolkit.py +264 -91
- camel/toolkits/slack_toolkit.py +64 -10
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +17 -11
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/enums.py +274 -7
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +15 -0
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/token_counting.py +43 -20
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +223 -83
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +170 -141
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1550
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
camel/datasets/base_generator.py
CHANGED
|
@@ -62,6 +62,7 @@ class BaseGenerator(abc.ABC, IterableDataset):
|
|
|
62
62
|
self._buffer = buffer
|
|
63
63
|
self._data: List[DataPoint] = []
|
|
64
64
|
self._batch_to_save: List[DataPoint] = []
|
|
65
|
+
self._iter_position: int = 0
|
|
65
66
|
|
|
66
67
|
if data_path:
|
|
67
68
|
file_path = Path(data_path)
|
|
@@ -103,9 +104,9 @@ class BaseGenerator(abc.ABC, IterableDataset):
|
|
|
103
104
|
r"""Async iterator that yields datapoints dynamically.
|
|
104
105
|
|
|
105
106
|
If a `data_path` was provided during initialization, those datapoints
|
|
106
|
-
are yielded first. When self.
|
|
107
|
-
are generated. Every 100 yields, the batch is appended
|
|
108
|
-
JSONL file or discarded if `cache` is None.
|
|
107
|
+
are yielded first. When self._iter_position reaches the end of _data,
|
|
108
|
+
new datapoints are generated. Every 100 yields, the batch is appended
|
|
109
|
+
to the JSONL file or discarded if `cache` is None.
|
|
109
110
|
|
|
110
111
|
Yields:
|
|
111
112
|
DataPoint: A single datapoint.
|
|
@@ -113,9 +114,10 @@ class BaseGenerator(abc.ABC, IterableDataset):
|
|
|
113
114
|
|
|
114
115
|
async def generator():
|
|
115
116
|
while True:
|
|
116
|
-
if
|
|
117
|
+
if self._iter_position >= len(self._data):
|
|
117
118
|
await self.generate_new(self._buffer)
|
|
118
|
-
datapoint = self._data.
|
|
119
|
+
datapoint = self._data[self._iter_position]
|
|
120
|
+
self._iter_position += 1
|
|
119
121
|
yield datapoint
|
|
120
122
|
self._batch_to_save.append(datapoint)
|
|
121
123
|
if len(self._batch_to_save) == 100:
|
|
@@ -132,9 +134,9 @@ class BaseGenerator(abc.ABC, IterableDataset):
|
|
|
132
134
|
r"""Synchronous iterator for PyTorch IterableDataset compatibility.
|
|
133
135
|
|
|
134
136
|
If a `data_path` was provided during initialization, those datapoints
|
|
135
|
-
are yielded first. When self.
|
|
136
|
-
are generated. Every 100 yields, the batch is appended
|
|
137
|
-
JSONL file or discarded if `cache` is None.
|
|
137
|
+
are yielded first. When self._iter_position reaches the end of _data,
|
|
138
|
+
new datapoints are generated. Every 100 yields, the batch is appended
|
|
139
|
+
to the JSONL file or discarded if `cache` is None.
|
|
138
140
|
|
|
139
141
|
Yields:
|
|
140
142
|
DataPoint: A single datapoint.
|
|
@@ -150,9 +152,10 @@ class BaseGenerator(abc.ABC, IterableDataset):
|
|
|
150
152
|
raise
|
|
151
153
|
|
|
152
154
|
while True:
|
|
153
|
-
if
|
|
155
|
+
if self._iter_position >= len(self._data):
|
|
154
156
|
asyncio.run(self.generate_new(self._buffer))
|
|
155
|
-
datapoint = self._data.
|
|
157
|
+
datapoint = self._data[self._iter_position]
|
|
158
|
+
self._iter_position += 1
|
|
156
159
|
yield datapoint
|
|
157
160
|
self._batch_to_save.append(datapoint)
|
|
158
161
|
if len(self._batch_to_save) == 100:
|
|
@@ -248,6 +251,7 @@ class BaseGenerator(abc.ABC, IterableDataset):
|
|
|
248
251
|
|
|
249
252
|
self.save_to_jsonl(file_path)
|
|
250
253
|
self._data = []
|
|
254
|
+
self._iter_position = 0
|
|
251
255
|
logger.info(f"Data flushed to {file_path} and cleared from the memory")
|
|
252
256
|
|
|
253
257
|
def _init_from_jsonl(self, file_path: Path) -> List[Dict[str, Any]]:
|
|
@@ -290,3 +294,28 @@ class BaseGenerator(abc.ABC, IterableDataset):
|
|
|
290
294
|
f"Successfully loaded {len(raw_data)} items from {file_path}"
|
|
291
295
|
)
|
|
292
296
|
return raw_data
|
|
297
|
+
|
|
298
|
+
def __getitem__(self, index: int) -> DataPoint:
|
|
299
|
+
r"""Get a datapoint by index without removing the datapoint from _data.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
index (int): Index of the datapoint to retrieve.
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
DataPoint: The datapoint at the specified index.
|
|
306
|
+
|
|
307
|
+
Raises:
|
|
308
|
+
IndexError: If the index is out of range.
|
|
309
|
+
"""
|
|
310
|
+
if index < 0 or index >= len(self._data):
|
|
311
|
+
raise IndexError(f"Index {index} is out of range")
|
|
312
|
+
|
|
313
|
+
return self._data[index]
|
|
314
|
+
|
|
315
|
+
def __len__(self) -> int:
|
|
316
|
+
r"""Get the number of datapoints in the dataset.
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
int: The number of datapoints.
|
|
320
|
+
"""
|
|
321
|
+
return len(self._data)
|
|
@@ -218,9 +218,34 @@ class SingleStepEnv:
|
|
|
218
218
|
return observations[0] if batch_size == 1 else observations
|
|
219
219
|
|
|
220
220
|
elif isinstance(self.dataset, BaseGenerator):
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
221
|
+
# Generate more data if needed
|
|
222
|
+
if batch_size > len(self.dataset):
|
|
223
|
+
new_datapoints_needed = batch_size - len(self.dataset)
|
|
224
|
+
await self.dataset.generate_new(n=new_datapoints_needed)
|
|
225
|
+
|
|
226
|
+
# Verify that enough data was generated
|
|
227
|
+
if len(self.dataset) < batch_size:
|
|
228
|
+
raise RuntimeError(
|
|
229
|
+
f"Failed to generate enough datapoints. "
|
|
230
|
+
f"Requested {batch_size}, but only "
|
|
231
|
+
f"{len(self.dataset)} available after generation."
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
# Choose sampling strategy based on whether seed is provided
|
|
235
|
+
if seed is not None:
|
|
236
|
+
# Deterministic random sampling when seed is provided
|
|
237
|
+
random_indices = rng.sample(
|
|
238
|
+
range(len(self.dataset)), batch_size
|
|
239
|
+
)
|
|
240
|
+
self._states = [self.dataset[ind] for ind in random_indices]
|
|
241
|
+
else:
|
|
242
|
+
# Sequential sampling when no seed (backward compatible)
|
|
243
|
+
# Use async_sample to maintain sequential behavior
|
|
244
|
+
self._states = [
|
|
245
|
+
await self.dataset.async_sample()
|
|
246
|
+
for _ in range(batch_size)
|
|
247
|
+
]
|
|
248
|
+
|
|
224
249
|
self.current_batch_size = batch_size
|
|
225
250
|
self._states_done = [False] * batch_size
|
|
226
251
|
|
|
@@ -483,7 +483,7 @@ class TicTacToeEnv(MultiStepEnv):
|
|
|
483
483
|
# Check all win combinations.
|
|
484
484
|
for a, b, c in TicTacToeEnv.WIN_COMBINATIONS:
|
|
485
485
|
if board[a] != " " and board[a] == board[b] == board[c]:
|
|
486
|
-
return board[a]
|
|
486
|
+
return board[a] # type: ignore[return-value]
|
|
487
487
|
# Check for draw.
|
|
488
488
|
if all(cell != " " for cell in board):
|
|
489
489
|
return "draw"
|
camel/interpreters/__init__.py
CHANGED
|
@@ -18,6 +18,7 @@ from .e2b_interpreter import E2BInterpreter
|
|
|
18
18
|
from .internal_python_interpreter import InternalPythonInterpreter
|
|
19
19
|
from .interpreter_error import InterpreterError
|
|
20
20
|
from .ipython_interpreter import JupyterKernelInterpreter
|
|
21
|
+
from .microsandbox_interpreter import MicrosandboxInterpreter
|
|
21
22
|
from .subprocess_interpreter import SubprocessInterpreter
|
|
22
23
|
|
|
23
24
|
__all__ = [
|
|
@@ -28,4 +29,5 @@ __all__ = [
|
|
|
28
29
|
'DockerInterpreter',
|
|
29
30
|
'JupyterKernelInterpreter',
|
|
30
31
|
'E2BInterpreter',
|
|
32
|
+
'MicrosandboxInterpreter',
|
|
31
33
|
]
|
|
@@ -1,11 +1,8 @@
|
|
|
1
1
|
# syntax=docker/dockerfile:1
|
|
2
|
-
|
|
3
2
|
FROM ubuntu:22.04
|
|
4
3
|
|
|
5
|
-
# Set environment variable to avoid interactive prompts
|
|
6
4
|
ENV DEBIAN_FRONTEND=noninteractive
|
|
7
5
|
|
|
8
|
-
# Update and install base utilities
|
|
9
6
|
RUN apt-get update && apt-get install -y \
|
|
10
7
|
build-essential \
|
|
11
8
|
software-properties-common \
|
|
@@ -20,7 +17,6 @@ RUN apt-get update && apt-get install -y \
|
|
|
20
17
|
&& apt-get clean \
|
|
21
18
|
&& apt-get autoremove -y
|
|
22
19
|
|
|
23
|
-
# Install Python 3.10 and its dependencies
|
|
24
20
|
RUN add-apt-repository ppa:deadsnakes/ppa && \
|
|
25
21
|
apt-get update && \
|
|
26
22
|
apt-get install -y \
|
|
@@ -34,32 +30,27 @@ RUN add-apt-repository ppa:deadsnakes/ppa && \
|
|
|
34
30
|
&& apt-get clean \
|
|
35
31
|
&& apt-get autoremove -y
|
|
36
32
|
|
|
37
|
-
# Install R
|
|
38
33
|
RUN apt-get update && \
|
|
39
34
|
apt-get install -y r-base && \
|
|
40
35
|
rm -rf /var/lib/apt/lists/* && \
|
|
41
36
|
apt-get clean && \
|
|
42
37
|
apt-get autoremove -y
|
|
43
38
|
|
|
44
|
-
# Install NodeJS 22.x
|
|
45
39
|
RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \
|
|
46
40
|
apt-get install -y nodejs && \
|
|
47
41
|
rm -rf /var/lib/apt/lists/* && \
|
|
48
42
|
apt-get clean && \
|
|
49
43
|
apt-get autoremove -y
|
|
50
44
|
|
|
51
|
-
# Install Poetry
|
|
52
45
|
RUN curl -fsSL https://install.python-poetry.org | python3.10 - && \
|
|
53
46
|
ln -s ~/.local/bin/poetry /usr/local/bin/poetry
|
|
54
47
|
|
|
55
|
-
# Upgrade pip and install base Python packages
|
|
56
48
|
RUN python3.10 -m pip install --upgrade pip setuptools wheel
|
|
57
|
-
|
|
58
|
-
# Install uv using pip instead of the shell script
|
|
59
49
|
RUN pip install uv
|
|
60
50
|
|
|
61
|
-
|
|
51
|
+
RUN groupadd -r devuser && useradd -r -m -g devuser devuser
|
|
62
52
|
WORKDIR /workspace
|
|
53
|
+
RUN chown -R devuser:devuser /workspace
|
|
54
|
+
USER devuser
|
|
63
55
|
|
|
64
|
-
# Set default shell
|
|
65
56
|
CMD ["/bin/bash"]
|
|
@@ -28,6 +28,11 @@ class E2BInterpreter(BaseInterpreter):
|
|
|
28
28
|
Args:
|
|
29
29
|
require_confirm (bool, optional): If True, prompt user before running
|
|
30
30
|
code strings for security. (default: :obj:`True`)
|
|
31
|
+
|
|
32
|
+
Environment Variables:
|
|
33
|
+
E2B_API_KEY: The API key for authenticating with the E2B service.
|
|
34
|
+
E2B_DOMAIN: The base URL for the E2B API. If not provided,
|
|
35
|
+
will use the default E2B endpoint.
|
|
31
36
|
"""
|
|
32
37
|
|
|
33
38
|
_CODE_TYPE_MAPPING: ClassVar[Dict[str, Optional[str]]] = {
|
|
@@ -55,7 +60,35 @@ class E2BInterpreter(BaseInterpreter):
|
|
|
55
60
|
from e2b_code_interpreter import Sandbox
|
|
56
61
|
|
|
57
62
|
self.require_confirm = require_confirm
|
|
58
|
-
|
|
63
|
+
|
|
64
|
+
# Get API key from environment variable
|
|
65
|
+
api_key = os.environ.get("E2B_API_KEY")
|
|
66
|
+
|
|
67
|
+
# Get domain from environment variable
|
|
68
|
+
domain = os.environ.get("E2B_DOMAIN")
|
|
69
|
+
|
|
70
|
+
# Create sandbox with appropriate parameters
|
|
71
|
+
sandbox_kwargs = {"api_key": api_key}
|
|
72
|
+
|
|
73
|
+
# Only add domain if it's provided
|
|
74
|
+
# (to maintain compatibility with standard E2B)
|
|
75
|
+
if domain:
|
|
76
|
+
sandbox_kwargs["domain"] = domain
|
|
77
|
+
logger.info(f"Using custom E2B endpoint: {domain}")
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
self._sandbox = Sandbox(**sandbox_kwargs)
|
|
81
|
+
except TypeError as e:
|
|
82
|
+
if domain and "domain" in str(e):
|
|
83
|
+
logger.warning(
|
|
84
|
+
f"The e2b_code_interpreter library doesn't support "
|
|
85
|
+
f"custom domain. "
|
|
86
|
+
f"Using default E2B endpoint. Error: {e}"
|
|
87
|
+
)
|
|
88
|
+
# Fallback to default configuration without domain
|
|
89
|
+
self._sandbox = Sandbox(api_key=api_key)
|
|
90
|
+
else:
|
|
91
|
+
raise e
|
|
59
92
|
|
|
60
93
|
def __del__(self) -> None:
|
|
61
94
|
r"""Destructor for the E2BInterpreter class.
|
|
@@ -0,0 +1,395 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import asyncio
|
|
15
|
+
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Union
|
|
16
|
+
|
|
17
|
+
from camel.interpreters.base import BaseInterpreter
|
|
18
|
+
from camel.interpreters.interpreter_error import InterpreterError
|
|
19
|
+
from camel.logger import get_logger
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MicrosandboxInterpreter(BaseInterpreter):
|
|
25
|
+
r"""Microsandbox Code Interpreter implementation.
|
|
26
|
+
|
|
27
|
+
This interpreter provides secure code execution using microsandbox,
|
|
28
|
+
a self-hosted platform for secure execution of untrusted user/AI code.
|
|
29
|
+
It supports Python code execution via PythonSandbox, JavaScript/Node.js
|
|
30
|
+
code execution via NodeSandbox, and shell commands via the command
|
|
31
|
+
interface.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
require_confirm (bool, optional): If True, prompt user before running
|
|
35
|
+
code strings for security. (default: :obj:`True`)
|
|
36
|
+
server_url (str, optional): URL of the microsandbox server. If not
|
|
37
|
+
provided, will use MSB_SERVER_URL environment variable, then
|
|
38
|
+
fall back to http://127.0.0.1:5555. (default: :obj:`None`)
|
|
39
|
+
api_key (str, optional): API key for microsandbox authentication.
|
|
40
|
+
If not provided, will use MSB_API_KEY environment variable.
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
namespace (str, optional): Namespace for the sandbox.
|
|
43
|
+
(default: :obj:`"default"`)
|
|
44
|
+
sandbox_name (str, optional): Name of the sandbox instance. If not
|
|
45
|
+
provided, a random name will be generated by the SDK.
|
|
46
|
+
(default: :obj:`None`)
|
|
47
|
+
timeout (int, optional): Default timeout for code execution in seconds.
|
|
48
|
+
(default: :obj:`30`)
|
|
49
|
+
|
|
50
|
+
Environment Variables:
|
|
51
|
+
MSB_SERVER_URL: URL of the microsandbox server.
|
|
52
|
+
MSB_API_KEY: API key for microsandbox authentication.
|
|
53
|
+
|
|
54
|
+
Note:
|
|
55
|
+
The SDK handles parameter priority as: user parameter > environment
|
|
56
|
+
variable > default value.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
_CODE_TYPE_MAPPING: ClassVar[Dict[str, str]] = {
|
|
60
|
+
# Python code - uses PythonSandbox
|
|
61
|
+
"python": "python_sandbox",
|
|
62
|
+
"py3": "python_sandbox",
|
|
63
|
+
"python3": "python_sandbox",
|
|
64
|
+
"py": "python_sandbox",
|
|
65
|
+
# JavaScript/Node.js code - uses NodeSandbox
|
|
66
|
+
"javascript": "node_sandbox",
|
|
67
|
+
"js": "node_sandbox",
|
|
68
|
+
"node": "node_sandbox",
|
|
69
|
+
"typescript": "node_sandbox",
|
|
70
|
+
"ts": "node_sandbox",
|
|
71
|
+
# Shell commands - uses command.run()
|
|
72
|
+
"bash": "shell_command",
|
|
73
|
+
"shell": "shell_command",
|
|
74
|
+
"sh": "shell_command",
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
def __init__(
|
|
78
|
+
self,
|
|
79
|
+
require_confirm: bool = True,
|
|
80
|
+
server_url: Optional[str] = None,
|
|
81
|
+
api_key: Optional[str] = None,
|
|
82
|
+
namespace: str = "default",
|
|
83
|
+
sandbox_name: Optional[str] = None,
|
|
84
|
+
timeout: int = 30,
|
|
85
|
+
) -> None:
|
|
86
|
+
from microsandbox import (
|
|
87
|
+
NodeSandbox,
|
|
88
|
+
PythonSandbox,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Store parameters, let SDK handle defaults and environment variables
|
|
92
|
+
self.require_confirm = require_confirm
|
|
93
|
+
self.server_url = server_url # None means use SDK default logic
|
|
94
|
+
self.api_key = api_key # None means use SDK default logic
|
|
95
|
+
self.namespace = namespace
|
|
96
|
+
self.sandbox_name = (
|
|
97
|
+
sandbox_name # None means SDK generates random name
|
|
98
|
+
)
|
|
99
|
+
self.timeout = timeout
|
|
100
|
+
|
|
101
|
+
# Store sandbox configuration
|
|
102
|
+
self._sandbox_config = {
|
|
103
|
+
"server_url": self.server_url,
|
|
104
|
+
"namespace": self.namespace,
|
|
105
|
+
"name": self.sandbox_name,
|
|
106
|
+
"api_key": self.api_key,
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
# Store sandbox classes for reuse
|
|
110
|
+
self._PythonSandbox = PythonSandbox
|
|
111
|
+
self._NodeSandbox = NodeSandbox
|
|
112
|
+
|
|
113
|
+
# Log initialization info
|
|
114
|
+
logger.info("Initialized MicrosandboxInterpreter")
|
|
115
|
+
logger.info(f"Namespace: {self.namespace}")
|
|
116
|
+
if self.sandbox_name:
|
|
117
|
+
logger.info(f"Sandbox name: {self.sandbox_name}")
|
|
118
|
+
else:
|
|
119
|
+
logger.info("Sandbox name: will be auto-generated by SDK")
|
|
120
|
+
|
|
121
|
+
def run(
|
|
122
|
+
self,
|
|
123
|
+
code: str,
|
|
124
|
+
code_type: str = "python",
|
|
125
|
+
) -> str:
|
|
126
|
+
r"""Executes the given code in the microsandbox.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
code (str): The code string to execute.
|
|
130
|
+
code_type (str): The type of code to execute. Supported types:
|
|
131
|
+
'python', 'javascript', 'bash'. (default: :obj:`python`)
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
str: The string representation of the output of the executed code.
|
|
135
|
+
|
|
136
|
+
Raises:
|
|
137
|
+
InterpreterError: If the `code_type` is not supported or if any
|
|
138
|
+
runtime error occurs during the execution of the code.
|
|
139
|
+
"""
|
|
140
|
+
if code_type not in self._CODE_TYPE_MAPPING:
|
|
141
|
+
raise InterpreterError(
|
|
142
|
+
f"Unsupported code type {code_type}. "
|
|
143
|
+
f"`{self.__class__.__name__}` only supports "
|
|
144
|
+
f"{', '.join(list(self._CODE_TYPE_MAPPING.keys()))}."
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Print code for security checking
|
|
148
|
+
if self.require_confirm:
|
|
149
|
+
logger.info(
|
|
150
|
+
f"The following {code_type} code will run on "
|
|
151
|
+
f"microsandbox: {code}"
|
|
152
|
+
)
|
|
153
|
+
self._confirm_execution("code")
|
|
154
|
+
|
|
155
|
+
# Run the code asynchronously
|
|
156
|
+
return asyncio.run(self._run_async(code, code_type))
|
|
157
|
+
|
|
158
|
+
async def _run_async(self, code: str, code_type: str) -> str:
|
|
159
|
+
r"""Asynchronously executes code in microsandbox.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
code (str): The code to execute.
|
|
163
|
+
code_type (str): The type of code to execute.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
str: The output of the executed code.
|
|
167
|
+
|
|
168
|
+
Raises:
|
|
169
|
+
InterpreterError: If execution fails.
|
|
170
|
+
"""
|
|
171
|
+
try:
|
|
172
|
+
execution_method = self._CODE_TYPE_MAPPING[code_type]
|
|
173
|
+
|
|
174
|
+
if execution_method == "python_sandbox":
|
|
175
|
+
return await self._run_python_code(code)
|
|
176
|
+
elif execution_method == "node_sandbox":
|
|
177
|
+
return await self._run_node_code(code)
|
|
178
|
+
elif execution_method == "shell_command":
|
|
179
|
+
return await self._run_shell_command(code)
|
|
180
|
+
else:
|
|
181
|
+
raise InterpreterError(
|
|
182
|
+
f"Unsupported execution method: {execution_method}"
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
except Exception as e:
|
|
186
|
+
raise InterpreterError(
|
|
187
|
+
f"Error executing code in microsandbox: {e}"
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
async def _run_python_code(self, code: str) -> str:
|
|
191
|
+
r"""Execute Python code using PythonSandbox.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
code (str): Python code to execute.
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
str: Execution output.
|
|
198
|
+
"""
|
|
199
|
+
async with self._PythonSandbox.create(
|
|
200
|
+
**self._sandbox_config
|
|
201
|
+
) as sandbox:
|
|
202
|
+
execution = await asyncio.wait_for(
|
|
203
|
+
sandbox.run(code), timeout=self.timeout
|
|
204
|
+
)
|
|
205
|
+
return await self._get_execution_output(execution)
|
|
206
|
+
|
|
207
|
+
async def _run_node_code(self, code: str) -> str:
|
|
208
|
+
r"""Execute JavaScript/Node.js code using NodeSandbox.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
code (str): JavaScript/Node.js code to execute.
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
str: Execution output.
|
|
215
|
+
"""
|
|
216
|
+
async with self._NodeSandbox.create(**self._sandbox_config) as sandbox:
|
|
217
|
+
execution = await asyncio.wait_for(
|
|
218
|
+
sandbox.run(code), timeout=self.timeout
|
|
219
|
+
)
|
|
220
|
+
return await self._get_execution_output(execution)
|
|
221
|
+
|
|
222
|
+
async def _run_shell_command(self, code: str) -> str:
|
|
223
|
+
r"""Execute shell commands directly.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
code (str): Shell command to execute.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
str: Command execution output.
|
|
230
|
+
"""
|
|
231
|
+
# Use any sandbox for shell commands
|
|
232
|
+
async with self._PythonSandbox.create(
|
|
233
|
+
**self._sandbox_config
|
|
234
|
+
) as sandbox:
|
|
235
|
+
execution = await asyncio.wait_for(
|
|
236
|
+
sandbox.command.run("bash", ["-c", code]), timeout=self.timeout
|
|
237
|
+
)
|
|
238
|
+
return await self._get_command_output(execution)
|
|
239
|
+
|
|
240
|
+
async def _get_execution_output(self, execution) -> str:
|
|
241
|
+
r"""Get output from code execution.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
execution: Execution object from sandbox.run().
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
str: Formatted execution output.
|
|
248
|
+
"""
|
|
249
|
+
output = await execution.output()
|
|
250
|
+
error = await execution.error()
|
|
251
|
+
|
|
252
|
+
result_parts = []
|
|
253
|
+
if output and output.strip():
|
|
254
|
+
result_parts.append(output.strip())
|
|
255
|
+
if error and error.strip():
|
|
256
|
+
result_parts.append(f"STDERR: {error.strip()}")
|
|
257
|
+
|
|
258
|
+
return (
|
|
259
|
+
"\n".join(result_parts)
|
|
260
|
+
if result_parts
|
|
261
|
+
else "Code executed successfully (no output)"
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
async def _get_command_output(self, execution) -> str:
|
|
265
|
+
r"""Get output from command execution.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
execution: CommandExecution object from sandbox.command.run().
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
str: Formatted command output.
|
|
272
|
+
"""
|
|
273
|
+
output = await execution.output()
|
|
274
|
+
error = await execution.error()
|
|
275
|
+
|
|
276
|
+
result_parts = []
|
|
277
|
+
if output and output.strip():
|
|
278
|
+
result_parts.append(output.strip())
|
|
279
|
+
if error and error.strip():
|
|
280
|
+
result_parts.append(f"STDERR: {error.strip()}")
|
|
281
|
+
if hasattr(execution, 'exit_code') and execution.exit_code != 0:
|
|
282
|
+
result_parts.append(f"Exit code: {execution.exit_code}")
|
|
283
|
+
|
|
284
|
+
return (
|
|
285
|
+
"\n".join(result_parts)
|
|
286
|
+
if result_parts
|
|
287
|
+
else "Command executed successfully (no output)"
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
def _confirm_execution(self, execution_type: str) -> None:
|
|
291
|
+
r"""Prompt user for confirmation before executing code or commands.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
execution_type (str): Type of execution ('code' or 'command').
|
|
295
|
+
|
|
296
|
+
Raises:
|
|
297
|
+
InterpreterError: If user declines to run the code/command.
|
|
298
|
+
"""
|
|
299
|
+
while True:
|
|
300
|
+
choice = input(f"Running {execution_type}? [Y/n]:").lower()
|
|
301
|
+
if choice in ["y", "yes", "ye"]:
|
|
302
|
+
break
|
|
303
|
+
elif choice not in ["no", "n"]:
|
|
304
|
+
continue
|
|
305
|
+
raise InterpreterError(
|
|
306
|
+
f"Execution halted: User opted not to run the "
|
|
307
|
+
f"{execution_type}. "
|
|
308
|
+
f"This choice stops the current operation and any "
|
|
309
|
+
f"further {execution_type} execution."
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
def supported_code_types(self) -> List[str]:
|
|
313
|
+
r"""Provides supported code types by the interpreter."""
|
|
314
|
+
return list(self._CODE_TYPE_MAPPING.keys())
|
|
315
|
+
|
|
316
|
+
def update_action_space(self, action_space: Dict[str, Any]) -> None:
|
|
317
|
+
r"""Updates action space for interpreter.
|
|
318
|
+
|
|
319
|
+
Args:
|
|
320
|
+
action_space: Action space dictionary (unused in microsandbox).
|
|
321
|
+
|
|
322
|
+
Note:
|
|
323
|
+
Microsandbox doesn't support action space updates as it runs
|
|
324
|
+
in isolated environments for each execution.
|
|
325
|
+
"""
|
|
326
|
+
# Explicitly acknowledge the parameter to avoid linting warnings
|
|
327
|
+
_ = action_space
|
|
328
|
+
logger.warning(
|
|
329
|
+
"Microsandbox doesn't support action space updates. "
|
|
330
|
+
"Code runs in isolated environments for each execution."
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
def execute_command(self, command: str) -> Union[str, Tuple[str, str]]:
|
|
334
|
+
r"""Execute a shell command in the microsandbox.
|
|
335
|
+
|
|
336
|
+
This method is designed for package management and system
|
|
337
|
+
administration tasks. It executes shell commands directly
|
|
338
|
+
using the microsandbox command interface.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
command (str): The shell command to execute (e.g.,
|
|
342
|
+
"pip install numpy", "ls -la", "apt-get update").
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
Union[str, Tuple[str, str]]: The output of the command.
|
|
346
|
+
|
|
347
|
+
Examples:
|
|
348
|
+
>>> interpreter.execute_command("pip install numpy")
|
|
349
|
+
>>> interpreter.execute_command("npm install express")
|
|
350
|
+
>>> interpreter.execute_command("ls -la /tmp")
|
|
351
|
+
"""
|
|
352
|
+
# Print command for security checking
|
|
353
|
+
if self.require_confirm:
|
|
354
|
+
logger.info(
|
|
355
|
+
f"The following shell command will run on "
|
|
356
|
+
f"microsandbox: {command}"
|
|
357
|
+
)
|
|
358
|
+
self._confirm_execution("command")
|
|
359
|
+
|
|
360
|
+
return asyncio.run(self._execute_command_async(command))
|
|
361
|
+
|
|
362
|
+
async def _execute_command_async(self, command: str) -> str:
|
|
363
|
+
r"""Asynchronously executes a shell command in microsandbox.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
command (str): The shell command to execute.
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
str: The output of the command execution.
|
|
370
|
+
|
|
371
|
+
Raises:
|
|
372
|
+
InterpreterError: If execution fails.
|
|
373
|
+
"""
|
|
374
|
+
try:
|
|
375
|
+
async with self._PythonSandbox.create(
|
|
376
|
+
**self._sandbox_config
|
|
377
|
+
) as sandbox:
|
|
378
|
+
execution = await asyncio.wait_for(
|
|
379
|
+
sandbox.command.run("bash", ["-c", command]),
|
|
380
|
+
timeout=self.timeout,
|
|
381
|
+
)
|
|
382
|
+
return await self._get_command_output(execution)
|
|
383
|
+
|
|
384
|
+
except Exception as e:
|
|
385
|
+
raise InterpreterError(
|
|
386
|
+
f"Error executing command in microsandbox: {e}"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
def __del__(self) -> None:
|
|
390
|
+
r"""Destructor for the MicrosandboxInterpreter class.
|
|
391
|
+
|
|
392
|
+
Microsandbox uses context managers for resource management,
|
|
393
|
+
so no explicit cleanup is needed.
|
|
394
|
+
"""
|
|
395
|
+
logger.debug("MicrosandboxInterpreter cleaned up")
|
camel/loaders/__init__.py
CHANGED
|
@@ -21,7 +21,6 @@ from .jina_url_reader import JinaURLReader
|
|
|
21
21
|
from .markitdown import MarkItDownLoader
|
|
22
22
|
from .mineru_extractor import MinerU
|
|
23
23
|
from .mistral_reader import MistralReader
|
|
24
|
-
from .pandas_reader import PandasReader
|
|
25
24
|
from .scrapegraph_reader import ScrapeGraphAI
|
|
26
25
|
from .unstructured_io import UnstructuredIO
|
|
27
26
|
|
|
@@ -33,7 +32,6 @@ __all__ = [
|
|
|
33
32
|
'JinaURLReader',
|
|
34
33
|
'Firecrawl',
|
|
35
34
|
'Apify',
|
|
36
|
-
'PandasReader',
|
|
37
35
|
'ChunkrReader',
|
|
38
36
|
'ChunkrReaderConfig',
|
|
39
37
|
'MinerU',
|
|
@@ -42,3 +40,14 @@ __all__ = [
|
|
|
42
40
|
'ScrapeGraphAI',
|
|
43
41
|
'MistralReader',
|
|
44
42
|
]
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def __getattr__(name: str):
|
|
46
|
+
if name == 'PandasReader':
|
|
47
|
+
raise ImportError(
|
|
48
|
+
"PandasReader has been removed from camel.loaders. "
|
|
49
|
+
"The pandasai dependency limited pandas to version 1.5.3. "
|
|
50
|
+
"Please use ExcelToolkit from camel.toolkits instead for "
|
|
51
|
+
"handling structured data."
|
|
52
|
+
)
|
|
53
|
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|