ms-enclave 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ms-enclave might be problematic. Click here for more details.
- ms_enclave/__init__.py +2 -2
- ms_enclave/cli/__init__.py +1 -0
- ms_enclave/cli/base.py +20 -0
- ms_enclave/cli/cli.py +27 -0
- ms_enclave/cli/start_server.py +84 -0
- ms_enclave/sandbox/__init__.py +27 -0
- ms_enclave/sandbox/boxes/__init__.py +16 -0
- ms_enclave/sandbox/boxes/base.py +267 -0
- ms_enclave/sandbox/boxes/docker_notebook.py +216 -0
- ms_enclave/sandbox/boxes/docker_sandbox.py +252 -0
- ms_enclave/sandbox/manager/__init__.py +11 -0
- ms_enclave/sandbox/manager/base.py +155 -0
- ms_enclave/sandbox/manager/http_manager.py +405 -0
- ms_enclave/sandbox/manager/local_manager.py +295 -0
- ms_enclave/sandbox/model/__init__.py +21 -0
- ms_enclave/sandbox/model/base.py +36 -0
- ms_enclave/sandbox/model/config.py +97 -0
- ms_enclave/sandbox/model/requests.py +57 -0
- ms_enclave/sandbox/model/responses.py +57 -0
- ms_enclave/sandbox/server/__init__.py +0 -0
- ms_enclave/sandbox/server/server.py +195 -0
- ms_enclave/sandbox/tools/__init__.py +4 -0
- ms_enclave/sandbox/tools/base.py +95 -0
- ms_enclave/sandbox/tools/sandbox_tool.py +46 -0
- ms_enclave/sandbox/tools/sandbox_tools/__init__.py +4 -0
- ms_enclave/sandbox/tools/sandbox_tools/file_operation.py +215 -0
- ms_enclave/sandbox/tools/sandbox_tools/notebook_executor.py +167 -0
- ms_enclave/sandbox/tools/sandbox_tools/python_executor.py +87 -0
- ms_enclave/sandbox/tools/sandbox_tools/shell_executor.py +63 -0
- ms_enclave/sandbox/tools/tool_info.py +141 -0
- ms_enclave/utils/__init__.py +1 -0
- ms_enclave/utils/json_schema.py +208 -0
- ms_enclave/utils/logger.py +106 -0
- ms_enclave/version.py +2 -2
- ms_enclave-0.0.1.dist-info/METADATA +314 -0
- ms_enclave-0.0.1.dist-info/RECORD +40 -0
- {ms_enclave-0.0.0.dist-info → ms_enclave-0.0.1.dist-info}/WHEEL +1 -1
- ms_enclave-0.0.1.dist-info/entry_points.txt +2 -0
- ms_enclave/run_server.py +0 -21
- ms_enclave-0.0.0.dist-info/METADATA +0 -329
- ms_enclave-0.0.0.dist-info/RECORD +0 -8
- {ms_enclave-0.0.0.dist-info → ms_enclave-0.0.1.dist-info}/licenses/LICENSE +0 -0
- {ms_enclave-0.0.0.dist-info → ms_enclave-0.0.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
"""File operation tool for reading and writing files."""
|
|
2
|
+
|
|
3
|
+
import io
|
|
4
|
+
import os
|
|
5
|
+
import tarfile
|
|
6
|
+
from typing import TYPE_CHECKING, Literal, Optional
|
|
7
|
+
|
|
8
|
+
from ms_enclave.sandbox.model import ExecutionStatus, SandboxType, ToolResult
|
|
9
|
+
from ms_enclave.sandbox.tools.base import register_tool
|
|
10
|
+
from ms_enclave.sandbox.tools.sandbox_tool import SandboxTool
|
|
11
|
+
from ms_enclave.sandbox.tools.tool_info import ToolParams
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from ms_enclave.sandbox.boxes import Sandbox
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@register_tool('file_operation')
|
|
18
|
+
class FileOperation(SandboxTool):
|
|
19
|
+
|
|
20
|
+
_name = 'file_operation'
|
|
21
|
+
_sandbox_type = SandboxType.DOCKER
|
|
22
|
+
_description = 'Perform file operations like read, write, delete, and list files'
|
|
23
|
+
_parameters = ToolParams(
|
|
24
|
+
type='object',
|
|
25
|
+
properties={
|
|
26
|
+
'operation': {
|
|
27
|
+
'type': 'string',
|
|
28
|
+
'description': 'Type of file operation to perform',
|
|
29
|
+
'enum': ['create', 'read', 'write', 'delete', 'list', 'exists']
|
|
30
|
+
},
|
|
31
|
+
'file_path': {
|
|
32
|
+
'type': 'string',
|
|
33
|
+
'description': 'Path to the file or directory'
|
|
34
|
+
},
|
|
35
|
+
'content': {
|
|
36
|
+
'type': 'string',
|
|
37
|
+
'description': 'Content to write to file (only for write operation)'
|
|
38
|
+
},
|
|
39
|
+
'encoding': {
|
|
40
|
+
'type': 'string',
|
|
41
|
+
'description': 'File encoding',
|
|
42
|
+
'default': 'utf-8'
|
|
43
|
+
}
|
|
44
|
+
},
|
|
45
|
+
required=['operation', 'file_path']
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
async def execute(
|
|
49
|
+
self,
|
|
50
|
+
sandbox_context: 'Sandbox',
|
|
51
|
+
operation: str,
|
|
52
|
+
file_path: str,
|
|
53
|
+
content: Optional[str] = None,
|
|
54
|
+
encoding: str = 'utf-8'
|
|
55
|
+
) -> ToolResult:
|
|
56
|
+
"""Perform file operations in the Docker container."""
|
|
57
|
+
|
|
58
|
+
if not file_path.strip():
|
|
59
|
+
return ToolResult(
|
|
60
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error='No file path provided'
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
if operation == 'read':
|
|
65
|
+
return await self._read_file(sandbox_context, file_path, encoding)
|
|
66
|
+
elif operation == 'write':
|
|
67
|
+
if content is None:
|
|
68
|
+
return ToolResult(
|
|
69
|
+
tool_name=self.name,
|
|
70
|
+
status=ExecutionStatus.ERROR,
|
|
71
|
+
output='',
|
|
72
|
+
error='Content is required for write operation'
|
|
73
|
+
)
|
|
74
|
+
return await self._write_file(sandbox_context, file_path, content, encoding)
|
|
75
|
+
elif operation == 'delete':
|
|
76
|
+
return await self._delete_file(sandbox_context, file_path)
|
|
77
|
+
elif operation == 'list':
|
|
78
|
+
return await self._list_directory(sandbox_context, file_path)
|
|
79
|
+
elif operation == 'exists':
|
|
80
|
+
return await self._check_exists(sandbox_context, file_path)
|
|
81
|
+
elif operation == 'create':
|
|
82
|
+
if content is None:
|
|
83
|
+
content = ''
|
|
84
|
+
return await self._write_file(sandbox_context, file_path, content, encoding)
|
|
85
|
+
else:
|
|
86
|
+
return ToolResult(
|
|
87
|
+
tool_name=self.name,
|
|
88
|
+
status=ExecutionStatus.ERROR,
|
|
89
|
+
output='',
|
|
90
|
+
error=f'Unknown operation: {operation}'
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
except Exception as e:
|
|
94
|
+
return ToolResult(
|
|
95
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error=f'Operation failed: {str(e)}'
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
async def _read_file(self, sandbox_context: 'Sandbox', file_path: str, encoding: str) -> ToolResult:
|
|
99
|
+
"""Read file content from the container."""
|
|
100
|
+
try:
|
|
101
|
+
# Use cat command to read file content
|
|
102
|
+
result = await sandbox_context.execute_command(f'cat "{file_path}"')
|
|
103
|
+
|
|
104
|
+
if result.exit_code == 0:
|
|
105
|
+
return ToolResult(tool_name=self.name, status=ExecutionStatus.SUCCESS, output=result.stdout, error=None)
|
|
106
|
+
else:
|
|
107
|
+
return ToolResult(
|
|
108
|
+
tool_name=self.name,
|
|
109
|
+
status=ExecutionStatus.ERROR,
|
|
110
|
+
output='',
|
|
111
|
+
error=result.stderr or f'Failed to read file: {file_path}'
|
|
112
|
+
)
|
|
113
|
+
except Exception as e:
|
|
114
|
+
return ToolResult(
|
|
115
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error=f'Read failed: {str(e)}'
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
async def _write_file(self, sandbox_context: 'Sandbox', file_path: str, content: str, encoding: str) -> ToolResult:
|
|
119
|
+
"""Write content to a file in the container."""
|
|
120
|
+
try:
|
|
121
|
+
# Create directory if it doesn't exist
|
|
122
|
+
dir_path = os.path.dirname(file_path)
|
|
123
|
+
if dir_path:
|
|
124
|
+
await sandbox_context.execute_command(f'mkdir -p "{dir_path}"')
|
|
125
|
+
|
|
126
|
+
# Write file using tar archive (similar to python_executor)
|
|
127
|
+
await self._write_file_to_container(sandbox_context, file_path, content)
|
|
128
|
+
|
|
129
|
+
return ToolResult(
|
|
130
|
+
tool_name=self.name,
|
|
131
|
+
status=ExecutionStatus.SUCCESS,
|
|
132
|
+
output=f'File written successfully: {file_path}',
|
|
133
|
+
error=None
|
|
134
|
+
)
|
|
135
|
+
except Exception as e:
|
|
136
|
+
return ToolResult(
|
|
137
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error=f'Write failed: {str(e)}'
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
async def _delete_file(self, sandbox_context: 'Sandbox', file_path: str) -> ToolResult:
|
|
141
|
+
"""Delete a file or directory from the container."""
|
|
142
|
+
try:
|
|
143
|
+
result = await sandbox_context.execute_command(f'rm -rf "{file_path}"')
|
|
144
|
+
|
|
145
|
+
if result.exit_code == 0:
|
|
146
|
+
return ToolResult(
|
|
147
|
+
tool_name=self.name,
|
|
148
|
+
status=ExecutionStatus.SUCCESS,
|
|
149
|
+
output=f'Successfully deleted: {file_path}',
|
|
150
|
+
error=None
|
|
151
|
+
)
|
|
152
|
+
else:
|
|
153
|
+
return ToolResult(
|
|
154
|
+
tool_name=self.name,
|
|
155
|
+
status=ExecutionStatus.ERROR,
|
|
156
|
+
output='',
|
|
157
|
+
error=result.stderr or f'Failed to delete: {file_path}'
|
|
158
|
+
)
|
|
159
|
+
except Exception as e:
|
|
160
|
+
return ToolResult(
|
|
161
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error=f'Delete failed: {str(e)}'
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
async def _list_directory(self, sandbox_context: 'Sandbox', dir_path: str) -> ToolResult:
|
|
165
|
+
"""List contents of a directory."""
|
|
166
|
+
try:
|
|
167
|
+
result = await sandbox_context.execute_command(f'ls -la "{dir_path}"')
|
|
168
|
+
|
|
169
|
+
if result.exit_code == 0:
|
|
170
|
+
return ToolResult(tool_name=self.name, status=ExecutionStatus.SUCCESS, output=result.stdout, error=None)
|
|
171
|
+
else:
|
|
172
|
+
return ToolResult(
|
|
173
|
+
tool_name=self.name,
|
|
174
|
+
status=ExecutionStatus.ERROR,
|
|
175
|
+
output='',
|
|
176
|
+
error=result.stderr or f'Failed to list directory: {dir_path}'
|
|
177
|
+
)
|
|
178
|
+
except Exception as e:
|
|
179
|
+
return ToolResult(
|
|
180
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error=f'List failed: {str(e)}'
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
async def _check_exists(self, sandbox_context: 'Sandbox', file_path: str) -> ToolResult:
|
|
184
|
+
"""Check if a file or directory exists."""
|
|
185
|
+
try:
|
|
186
|
+
result = await sandbox_context.execute_command(f'test -e "{file_path}"')
|
|
187
|
+
|
|
188
|
+
exists = result.exit_code == 0
|
|
189
|
+
return ToolResult(
|
|
190
|
+
tool_name=self.name,
|
|
191
|
+
status=ExecutionStatus.SUCCESS,
|
|
192
|
+
output=f'{"exists" if exists else "does not exist"}',
|
|
193
|
+
error=None
|
|
194
|
+
)
|
|
195
|
+
except Exception as e:
|
|
196
|
+
return ToolResult(
|
|
197
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error=f'Exists check failed: {str(e)}'
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
async def _write_file_to_container(self, sandbox_context: 'Sandbox', file_path: str, content: str) -> None:
|
|
201
|
+
"""Write content to a file in the container using tar archive."""
|
|
202
|
+
# Create a tar archive in memory
|
|
203
|
+
tar_stream = io.BytesIO()
|
|
204
|
+
tar = tarfile.TarFile(fileobj=tar_stream, mode='w')
|
|
205
|
+
|
|
206
|
+
# Add file to tar
|
|
207
|
+
file_data = content.encode('utf-8')
|
|
208
|
+
tarinfo = tarfile.TarInfo(name=os.path.basename(file_path))
|
|
209
|
+
tarinfo.size = len(file_data)
|
|
210
|
+
tar.addfile(tarinfo, io.BytesIO(file_data))
|
|
211
|
+
tar.close()
|
|
212
|
+
|
|
213
|
+
# Write to container
|
|
214
|
+
tar_stream.seek(0)
|
|
215
|
+
sandbox_context.container.put_archive(os.path.dirname(file_path), tar_stream.getvalue())
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
"""Notebook code execution tool for Jupyter kernels."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
from typing import TYPE_CHECKING, Optional
|
|
7
|
+
|
|
8
|
+
from ms_enclave.sandbox.model import CommandResult, ExecutionStatus, SandboxType, ToolResult
|
|
9
|
+
from ms_enclave.sandbox.tools.base import Tool, register_tool
|
|
10
|
+
from ms_enclave.sandbox.tools.sandbox_tool import SandboxTool
|
|
11
|
+
from ms_enclave.sandbox.tools.tool_info import ToolParams
|
|
12
|
+
from ms_enclave.utils import get_logger
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from ms_enclave.sandbox.boxes import Sandbox
|
|
16
|
+
|
|
17
|
+
logger = get_logger()
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@register_tool('notebook_executor')
|
|
21
|
+
class NotebookExecutor(SandboxTool):
|
|
22
|
+
|
|
23
|
+
_name = 'notebook_executor'
|
|
24
|
+
_sandbox_type = SandboxType.DOCKER_NOTEBOOK
|
|
25
|
+
_description = 'Execute Python code in a Jupyter kernel environment'
|
|
26
|
+
_parameters = ToolParams(
|
|
27
|
+
type='object',
|
|
28
|
+
properties={
|
|
29
|
+
'code': {
|
|
30
|
+
'type': 'string',
|
|
31
|
+
'description': 'Python code to execute in the notebook kernel'
|
|
32
|
+
},
|
|
33
|
+
'timeout': {
|
|
34
|
+
'type': 'integer',
|
|
35
|
+
'description': 'Execution timeout in seconds',
|
|
36
|
+
'default': 30
|
|
37
|
+
}
|
|
38
|
+
},
|
|
39
|
+
required=['code']
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
async def execute(self, sandbox_context: 'Sandbox', code: str, timeout: Optional[int] = 30) -> ToolResult:
|
|
43
|
+
"""Execute Python code in the Jupyter kernel."""
|
|
44
|
+
|
|
45
|
+
if not code.strip():
|
|
46
|
+
return ToolResult(tool_name=self.name, status=ExecutionStatus.ERROR, output='', error='No code provided')
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
# Execute code using the sandbox's Jupyter kernel
|
|
50
|
+
result = await self._execute_in_kernel(sandbox_context, code, timeout)
|
|
51
|
+
|
|
52
|
+
if result.exit_code == 0:
|
|
53
|
+
status = ExecutionStatus.SUCCESS
|
|
54
|
+
else:
|
|
55
|
+
status = ExecutionStatus.ERROR
|
|
56
|
+
|
|
57
|
+
return ToolResult(
|
|
58
|
+
tool_name=self.name,
|
|
59
|
+
status=status,
|
|
60
|
+
output=result.stdout,
|
|
61
|
+
error=result.stderr if result.stderr else None
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
except Exception as e:
|
|
65
|
+
return ToolResult(
|
|
66
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error=f'Execution failed: {str(e)}'
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
async def _execute_in_kernel(self, sandbox_context: 'Sandbox', code: str, timeout: Optional[int]):
|
|
70
|
+
"""Execute code in the Jupyter kernel via websocket."""
|
|
71
|
+
|
|
72
|
+
# Check if sandbox has the required Jupyter components
|
|
73
|
+
if not hasattr(sandbox_context, 'ws') or not hasattr(sandbox_context, 'kernel_id'):
|
|
74
|
+
raise RuntimeError('Sandbox does not have Jupyter kernel setup')
|
|
75
|
+
|
|
76
|
+
if not sandbox_context.ws or not sandbox_context.kernel_id:
|
|
77
|
+
raise RuntimeError('Jupyter kernel is not ready')
|
|
78
|
+
|
|
79
|
+
# Send execute request
|
|
80
|
+
msg_id = self._send_execute_request(sandbox_context, code)
|
|
81
|
+
|
|
82
|
+
outputs = []
|
|
83
|
+
result = None
|
|
84
|
+
error_occurred = False
|
|
85
|
+
error_msg = ''
|
|
86
|
+
actual_timeout = timeout or 30
|
|
87
|
+
start_time = time.time()
|
|
88
|
+
|
|
89
|
+
while True:
|
|
90
|
+
elapsed = time.time() - start_time
|
|
91
|
+
if elapsed >= actual_timeout:
|
|
92
|
+
error_occurred = True
|
|
93
|
+
error_msg = f'Execution timed out after {actual_timeout} seconds'
|
|
94
|
+
logger.error(error_msg)
|
|
95
|
+
break
|
|
96
|
+
|
|
97
|
+
try:
|
|
98
|
+
# Set a short timeout for recv to allow periodic timeout check
|
|
99
|
+
sandbox_context.ws.settimeout(min(1, actual_timeout - elapsed))
|
|
100
|
+
msg = json.loads(sandbox_context.ws.recv())
|
|
101
|
+
parent_msg_id = msg.get('parent_header', {}).get('msg_id')
|
|
102
|
+
|
|
103
|
+
# Skip unrelated messages
|
|
104
|
+
if parent_msg_id != msg_id:
|
|
105
|
+
continue
|
|
106
|
+
|
|
107
|
+
msg_type = msg.get('msg_type', '')
|
|
108
|
+
msg_content = msg.get('content', {})
|
|
109
|
+
|
|
110
|
+
if msg_type == 'stream':
|
|
111
|
+
outputs.append(msg_content['text'])
|
|
112
|
+
elif msg_type == 'execute_result':
|
|
113
|
+
result = msg_content['data'].get('text/plain', '')
|
|
114
|
+
elif msg_type == 'error':
|
|
115
|
+
error_occurred = True
|
|
116
|
+
error_msg = '\n'.join(msg_content.get('traceback', []))
|
|
117
|
+
outputs.append(error_msg)
|
|
118
|
+
elif msg_type == 'status' and msg_content.get('execution_state') == 'idle':
|
|
119
|
+
break
|
|
120
|
+
|
|
121
|
+
except Exception as e:
|
|
122
|
+
logger.error(f'Error receiving message: {e}')
|
|
123
|
+
error_occurred = True
|
|
124
|
+
error_msg = str(e)
|
|
125
|
+
break
|
|
126
|
+
|
|
127
|
+
output_text = ''.join(outputs)
|
|
128
|
+
if result:
|
|
129
|
+
output_text += f'\nResult: {result}'
|
|
130
|
+
if error_msg and error_msg not in output_text:
|
|
131
|
+
output_text += f'\n{error_msg}'
|
|
132
|
+
|
|
133
|
+
return CommandResult(
|
|
134
|
+
status=ExecutionStatus.SUCCESS if not error_occurred else ExecutionStatus.ERROR,
|
|
135
|
+
command=code,
|
|
136
|
+
exit_code=1 if error_occurred else 0,
|
|
137
|
+
stdout=output_text if not error_occurred else '',
|
|
138
|
+
stderr=output_text if error_occurred else ''
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
def _send_execute_request(self, sandbox_context: 'Sandbox', code: str) -> str:
|
|
142
|
+
"""Send code execution request to kernel."""
|
|
143
|
+
# Generate a unique message ID
|
|
144
|
+
msg_id = str(uuid.uuid4())
|
|
145
|
+
|
|
146
|
+
# Create execute request
|
|
147
|
+
execute_request = {
|
|
148
|
+
'header': {
|
|
149
|
+
'msg_id': msg_id,
|
|
150
|
+
'username': 'anonymous',
|
|
151
|
+
'session': str(uuid.uuid4()),
|
|
152
|
+
'msg_type': 'execute_request',
|
|
153
|
+
'version': '5.0',
|
|
154
|
+
},
|
|
155
|
+
'parent_header': {},
|
|
156
|
+
'metadata': {},
|
|
157
|
+
'content': {
|
|
158
|
+
'code': code,
|
|
159
|
+
'silent': False,
|
|
160
|
+
'store_history': True,
|
|
161
|
+
'user_expressions': {},
|
|
162
|
+
'allow_stdin': False,
|
|
163
|
+
},
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
sandbox_context.ws.send(json.dumps(execute_request))
|
|
167
|
+
return msg_id
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Python code execution tool."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import TYPE_CHECKING, Optional
|
|
6
|
+
|
|
7
|
+
from ms_enclave.sandbox.model import ExecutionStatus, SandboxType, ToolResult
|
|
8
|
+
from ms_enclave.sandbox.tools.base import Tool, register_tool
|
|
9
|
+
from ms_enclave.sandbox.tools.sandbox_tool import SandboxTool
|
|
10
|
+
from ms_enclave.sandbox.tools.tool_info import ToolParams
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from ms_enclave.sandbox.boxes import DockerSandbox
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@register_tool('python_executor')
|
|
17
|
+
class PythonExecutor(SandboxTool):
|
|
18
|
+
|
|
19
|
+
_name = 'python_executor'
|
|
20
|
+
_sandbox_type = SandboxType.DOCKER
|
|
21
|
+
_description = 'Execute Python code in an isolated environment using IPython'
|
|
22
|
+
_parameters = ToolParams(
|
|
23
|
+
type='object',
|
|
24
|
+
properties={
|
|
25
|
+
'code': {
|
|
26
|
+
'type': 'string',
|
|
27
|
+
'description': 'Python code to execute'
|
|
28
|
+
},
|
|
29
|
+
'timeout': {
|
|
30
|
+
'type': 'integer',
|
|
31
|
+
'description': 'Execution timeout in seconds',
|
|
32
|
+
'default': 30
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
required=['code']
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
async def execute(self, sandbox_context: 'DockerSandbox', code: str, timeout: Optional[int] = 30) -> ToolResult:
|
|
39
|
+
"""Execute Python code by writing to a temporary file and executing it."""
|
|
40
|
+
|
|
41
|
+
script_basename = f'exec_script_{uuid.uuid4().hex}.py'
|
|
42
|
+
script_path = f'/tmp/{script_basename}'
|
|
43
|
+
|
|
44
|
+
if not code.strip():
|
|
45
|
+
return ToolResult(tool_name=self.name, status=ExecutionStatus.ERROR, output='', error='No code provided')
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
|
|
49
|
+
# Write script to container to avoid long code errors
|
|
50
|
+
await self._write_file_to_container(sandbox_context, script_path, code)
|
|
51
|
+
|
|
52
|
+
# Execute using python
|
|
53
|
+
command = f'python {script_path}'
|
|
54
|
+
result = await sandbox_context.execute_command(command, timeout=timeout)
|
|
55
|
+
|
|
56
|
+
if result.exit_code == 0:
|
|
57
|
+
status = ExecutionStatus.SUCCESS
|
|
58
|
+
else:
|
|
59
|
+
status = ExecutionStatus.ERROR
|
|
60
|
+
|
|
61
|
+
return ToolResult(
|
|
62
|
+
tool_name=self.name,
|
|
63
|
+
status=status,
|
|
64
|
+
output=result.stdout,
|
|
65
|
+
error=result.stderr if result.stderr else None
|
|
66
|
+
)
|
|
67
|
+
except Exception as e:
|
|
68
|
+
return ToolResult(
|
|
69
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error=f'Execution failed: {str(e)}'
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
async def _write_file_to_container(self, sandbox_context: 'DockerSandbox', file_path: str, content: str) -> None:
|
|
73
|
+
"""Write content to a file in the container."""
|
|
74
|
+
import io
|
|
75
|
+
import tarfile
|
|
76
|
+
|
|
77
|
+
# Create a tar archive in memory using context managers
|
|
78
|
+
with io.BytesIO() as tar_stream:
|
|
79
|
+
with tarfile.TarFile(fileobj=tar_stream, mode='w') as tar:
|
|
80
|
+
file_data = content.encode('utf-8')
|
|
81
|
+
tarinfo = tarfile.TarInfo(name=os.path.basename(file_path))
|
|
82
|
+
tarinfo.size = len(file_data)
|
|
83
|
+
tar.addfile(tarinfo, io.BytesIO(file_data))
|
|
84
|
+
|
|
85
|
+
# Reset stream position and put archive into container
|
|
86
|
+
tar_stream.seek(0)
|
|
87
|
+
sandbox_context.container.put_archive(os.path.dirname(file_path), tar_stream.getvalue())
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""Shell command execution tool."""
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Optional
|
|
4
|
+
|
|
5
|
+
from ms_enclave.sandbox.model import ExecutionStatus, SandboxType, ToolResult
|
|
6
|
+
from ms_enclave.sandbox.tools.base import register_tool
|
|
7
|
+
from ms_enclave.sandbox.tools.sandbox_tool import SandboxTool
|
|
8
|
+
from ms_enclave.sandbox.tools.tool_info import ToolParams
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from ms_enclave.sandbox.boxes import Sandbox
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@register_tool('shell_executor')
|
|
15
|
+
class ShellExecutor(SandboxTool):
|
|
16
|
+
|
|
17
|
+
_name = 'shell_executor'
|
|
18
|
+
_sandbox_type = SandboxType.DOCKER
|
|
19
|
+
_description = 'Execute shell commands in an isolated environment'
|
|
20
|
+
_parameters = ToolParams(
|
|
21
|
+
type='object',
|
|
22
|
+
properties={
|
|
23
|
+
'command': {
|
|
24
|
+
'type': 'string',
|
|
25
|
+
'description': 'Shell command to execute'
|
|
26
|
+
},
|
|
27
|
+
'timeout': {
|
|
28
|
+
'type': 'integer',
|
|
29
|
+
'description': 'Execution timeout in seconds',
|
|
30
|
+
'default': 30
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
required=['command']
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
async def execute(self, sandbox_context: 'Sandbox', command: str, timeout: Optional[int] = 30) -> ToolResult:
|
|
37
|
+
"""Execute shell command in the Docker container."""
|
|
38
|
+
|
|
39
|
+
if not command.strip():
|
|
40
|
+
return ToolResult(tool_name=self.name, status=ExecutionStatus.ERROR, output='', error='No command provided')
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
result = await sandbox_context.execute_command(command, timeout=timeout)
|
|
44
|
+
|
|
45
|
+
if result.exit_code == 0:
|
|
46
|
+
return ToolResult(
|
|
47
|
+
tool_name=self.name,
|
|
48
|
+
status=ExecutionStatus.SUCCESS,
|
|
49
|
+
output=result.stdout,
|
|
50
|
+
error=result.stderr if result.stderr else None
|
|
51
|
+
)
|
|
52
|
+
else:
|
|
53
|
+
return ToolResult(
|
|
54
|
+
tool_name=self.name,
|
|
55
|
+
status=ExecutionStatus.ERROR,
|
|
56
|
+
output=result.stdout,
|
|
57
|
+
error=result.stderr if result.stderr else f'Command failed with exit code {result.exit_code}'
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
except Exception as e:
|
|
61
|
+
return ToolResult(
|
|
62
|
+
tool_name=self.name, status=ExecutionStatus.ERROR, output='', error=f'Execution failed: {str(e)}'
|
|
63
|
+
)
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Any, Callable, Dict, List, Literal, Optional, TypeAlias, Union, get_args, get_type_hints
|
|
4
|
+
|
|
5
|
+
from docstring_parser import Docstring, parse
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
7
|
+
|
|
8
|
+
from ms_enclave.utils.json_schema import JSONSchema, JSONType, json_schema, python_type_to_json_type
|
|
9
|
+
|
|
10
|
+
ToolParam: TypeAlias = JSONSchema
|
|
11
|
+
"""Description of tool parameter in JSON Schema format."""
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ToolParams(BaseModel):
|
|
15
|
+
"""Description of tool parameters object in JSON Schema format."""
|
|
16
|
+
|
|
17
|
+
type: Literal['object'] = Field(default='object', description="Params type (always 'object')")
|
|
18
|
+
properties: Dict[str, ToolParam] = Field(default_factory=dict, description='Tool function parameters.')
|
|
19
|
+
required: List[str] = Field(default_factory=list, description='List of required fields.')
|
|
20
|
+
additionalProperties: bool = Field(
|
|
21
|
+
default=False, description='Are additional object properties allowed? (always `False`)'
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ToolInfo(BaseModel):
|
|
26
|
+
"""Specification of a tool (JSON Schema compatible)
|
|
27
|
+
|
|
28
|
+
If you are implementing a ModelAPI, most LLM libraries can
|
|
29
|
+
be passed this object (dumped to a dict) directly as a function
|
|
30
|
+
specification. For example, in the OpenAI provider:
|
|
31
|
+
|
|
32
|
+
```python
|
|
33
|
+
ChatCompletionToolParam(
|
|
34
|
+
type="function",
|
|
35
|
+
function=tool.model_dump(exclude_none=True),
|
|
36
|
+
)
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
In some cases the field names don't match up exactly. In that case
|
|
40
|
+
call `model_dump()` on the `parameters` field. For example, in the
|
|
41
|
+
Anthropic provider:
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
ToolParam(
|
|
45
|
+
name=tool.name,
|
|
46
|
+
description=tool.description,
|
|
47
|
+
input_schema=tool.parameters.model_dump(exclude_none=True),
|
|
48
|
+
)
|
|
49
|
+
```
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
name: str = Field(description='Name of tool.')
|
|
53
|
+
description: str = Field(description='Short description of tool.')
|
|
54
|
+
parameters: ToolParams = Field(default_factory=ToolParams, description='JSON Schema of tool parameters object.')
|
|
55
|
+
options: Optional[Dict[str, object]] = Field(
|
|
56
|
+
default=None,
|
|
57
|
+
description=
|
|
58
|
+
'Optional property bag that can be used by the model provider to customize the implementation of the tool'
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def parse_tool_info(func: Callable[..., Any]) -> ToolInfo:
|
|
63
|
+
# tool may already have registry attributes w/ tool info
|
|
64
|
+
|
|
65
|
+
if (getattr(func, 'name', None) and getattr(func, 'description', None) and getattr(func, 'parameters', None)):
|
|
66
|
+
return ToolInfo(
|
|
67
|
+
name=func.name,
|
|
68
|
+
description=func.description,
|
|
69
|
+
parameters=func.parameters,
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
signature = inspect.signature(func)
|
|
73
|
+
type_hints = get_type_hints(func)
|
|
74
|
+
docstring = inspect.getdoc(func)
|
|
75
|
+
parsed_docstring: Optional[Docstring] = parse(docstring) if docstring else None
|
|
76
|
+
|
|
77
|
+
info = ToolInfo(name=func.__name__, description='')
|
|
78
|
+
|
|
79
|
+
for param_name, param in signature.parameters.items():
|
|
80
|
+
tool_param = ToolParam()
|
|
81
|
+
|
|
82
|
+
# Parse docstring
|
|
83
|
+
docstring_info = parse_docstring(docstring, param_name)
|
|
84
|
+
|
|
85
|
+
# Get type information from type annotations
|
|
86
|
+
if param_name in type_hints:
|
|
87
|
+
tool_param = json_schema(type_hints[param_name])
|
|
88
|
+
# as a fallback try to parse it from the docstring
|
|
89
|
+
# (this is minimally necessary for backwards compatiblity
|
|
90
|
+
# with tools gen1 type parsing, which only used docstrings)
|
|
91
|
+
elif 'docstring_type' in docstring_info:
|
|
92
|
+
json_type = python_type_to_json_type(docstring_info['docstring_type'])
|
|
93
|
+
if json_type and (json_type in get_args(JSONType)):
|
|
94
|
+
tool_param = ToolParam(type=json_type)
|
|
95
|
+
|
|
96
|
+
# Get default value
|
|
97
|
+
if param.default is param.empty:
|
|
98
|
+
info.parameters.required.append(param_name)
|
|
99
|
+
else:
|
|
100
|
+
tool_param.default = param.default
|
|
101
|
+
|
|
102
|
+
# Add description from docstring
|
|
103
|
+
if 'description' in docstring_info:
|
|
104
|
+
tool_param.description = docstring_info['description']
|
|
105
|
+
|
|
106
|
+
# append the tool param
|
|
107
|
+
info.parameters.properties[param_name] = tool_param
|
|
108
|
+
|
|
109
|
+
# Add function description if available
|
|
110
|
+
if parsed_docstring:
|
|
111
|
+
if parsed_docstring.description:
|
|
112
|
+
info.description = parsed_docstring.description.strip()
|
|
113
|
+
elif parsed_docstring.long_description:
|
|
114
|
+
info.description = parsed_docstring.long_description.strip()
|
|
115
|
+
elif parsed_docstring.short_description:
|
|
116
|
+
info.description = parsed_docstring.short_description.strip()
|
|
117
|
+
|
|
118
|
+
# Add examples if available
|
|
119
|
+
if parsed_docstring.examples:
|
|
120
|
+
examples = '\n\n'.join([(example.description or '') for example in parsed_docstring.examples])
|
|
121
|
+
info.description = f'{info.description}\n\nExamples\n\n{examples}'
|
|
122
|
+
|
|
123
|
+
return info
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def parse_docstring(docstring: Optional[str], param_name: str) -> Dict[str, str]:
|
|
127
|
+
if not docstring:
|
|
128
|
+
return {}
|
|
129
|
+
|
|
130
|
+
parsed_docstring: Docstring = parse(docstring)
|
|
131
|
+
|
|
132
|
+
for param in parsed_docstring.params:
|
|
133
|
+
if param.arg_name == param_name:
|
|
134
|
+
schema: Dict[str, str] = {'description': param.description or ''}
|
|
135
|
+
|
|
136
|
+
if param.type_name:
|
|
137
|
+
schema['docstring_type'] = param.type_name
|
|
138
|
+
|
|
139
|
+
return schema
|
|
140
|
+
|
|
141
|
+
return {}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .logger import get_logger
|