lm-deluge 0.0.88__py3-none-any.whl → 0.0.90__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- lm_deluge/__init__.py +0 -24
- lm_deluge/api_requests/anthropic.py +25 -5
- lm_deluge/api_requests/base.py +37 -0
- lm_deluge/api_requests/bedrock.py +23 -2
- lm_deluge/api_requests/gemini.py +36 -10
- lm_deluge/api_requests/openai.py +31 -4
- lm_deluge/batches.py +15 -45
- lm_deluge/client.py +27 -1
- lm_deluge/models/__init__.py +2 -0
- lm_deluge/models/anthropic.py +12 -12
- lm_deluge/models/google.py +13 -0
- lm_deluge/models/minimax.py +9 -1
- lm_deluge/models/openrouter.py +48 -0
- lm_deluge/models/zai.py +50 -1
- lm_deluge/pipelines/gepa/docs/samples.py +19 -10
- lm_deluge/prompt.py +333 -68
- lm_deluge/server/__init__.py +24 -0
- lm_deluge/server/__main__.py +144 -0
- lm_deluge/server/adapters.py +369 -0
- lm_deluge/server/app.py +388 -0
- lm_deluge/server/auth.py +71 -0
- lm_deluge/server/model_policy.py +215 -0
- lm_deluge/server/models_anthropic.py +172 -0
- lm_deluge/server/models_openai.py +175 -0
- lm_deluge/skills/anthropic.py +0 -0
- lm_deluge/skills/compat.py +0 -0
- lm_deluge/tool/__init__.py +13 -1
- lm_deluge/tool/prefab/sandbox/__init__.py +19 -0
- lm_deluge/tool/prefab/sandbox/daytona_sandbox.py +483 -0
- lm_deluge/tool/prefab/sandbox/docker_sandbox.py +609 -0
- lm_deluge/tool/prefab/sandbox/fargate_sandbox.py +546 -0
- lm_deluge/tool/prefab/sandbox/modal_sandbox.py +469 -0
- lm_deluge/tool/prefab/sandbox/seatbelt_sandbox.py +827 -0
- lm_deluge/tool/prefab/skills.py +0 -0
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/METADATA +4 -3
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/RECORD +39 -24
- lm_deluge/mock_openai.py +0 -643
- lm_deluge/tool/prefab/sandbox.py +0 -1621
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/WHEEL +0 -0
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/licenses/LICENSE +0 -0
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,483 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import secrets
|
|
3
|
+
import time
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from lm_deluge.tool import Tool
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class TrackedProcess:
|
|
12
|
+
"""Tracks a process running in the sandbox."""
|
|
13
|
+
|
|
14
|
+
process: Any
|
|
15
|
+
name: str
|
|
16
|
+
command: str
|
|
17
|
+
started_at: float = field(default_factory=time.time)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class DaytonaSandbox:
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
api_key: str | None = None,
|
|
24
|
+
api_url: str | None = None,
|
|
25
|
+
target: str | None = None,
|
|
26
|
+
sandbox_id: str | None = None,
|
|
27
|
+
language: str = "python",
|
|
28
|
+
auto_start: bool = True,
|
|
29
|
+
stateful: bool = False,
|
|
30
|
+
):
|
|
31
|
+
"""
|
|
32
|
+
Initialize a Daytona sandbox.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
api_key: Daytona API key (if None, will look for DAYTONA_API_KEY env var)
|
|
36
|
+
api_url: Daytona API URL (if None, will look for DAYTONA_API_URL env var)
|
|
37
|
+
target: Daytona target (if None, will look for DAYTONA_TARGET env var)
|
|
38
|
+
sandbox_id: ID of existing sandbox to connect to (if None, creates a new one)
|
|
39
|
+
language: Programming language for the sandbox (default: python)
|
|
40
|
+
auto_start: Whether to automatically start the sandbox if stopped
|
|
41
|
+
stateful: If True, shell state (variables, cd, functions) persists between commands
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
self.api_key = api_key or os.getenv("DAYTONA_API_KEY")
|
|
45
|
+
self.api_url = api_url or os.getenv("DAYTONA_API_URL")
|
|
46
|
+
self.target = target or os.getenv("DAYTONA_TARGET")
|
|
47
|
+
self.sandbox_id = sandbox_id
|
|
48
|
+
self.language = language
|
|
49
|
+
self.auto_start = auto_start
|
|
50
|
+
self.stateful = stateful
|
|
51
|
+
self.sandbox = None
|
|
52
|
+
self.client = None
|
|
53
|
+
self._initialized = False
|
|
54
|
+
self._destroyed = False
|
|
55
|
+
|
|
56
|
+
# Stateful mode: session for persistent shell state
|
|
57
|
+
self._session_id: str | None = None
|
|
58
|
+
self._session_initialized = False
|
|
59
|
+
|
|
60
|
+
async def __aenter__(self):
|
|
61
|
+
"""Async context manager entry - initialize sandbox."""
|
|
62
|
+
await self._ensure_initialized()
|
|
63
|
+
return self
|
|
64
|
+
|
|
65
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
66
|
+
"""Async context manager exit - cleanup sandbox."""
|
|
67
|
+
if not self._destroyed:
|
|
68
|
+
await self._destroy()
|
|
69
|
+
return False
|
|
70
|
+
|
|
71
|
+
def __del__(self):
|
|
72
|
+
"""Cleanup sandbox when garbage collected (backup cleanup).
|
|
73
|
+
|
|
74
|
+
Note: This attempts sync cleanup which may not work perfectly for async resources.
|
|
75
|
+
Prefer using 'async with' for guaranteed cleanup.
|
|
76
|
+
"""
|
|
77
|
+
if not self._destroyed and self.sandbox:
|
|
78
|
+
import warnings
|
|
79
|
+
|
|
80
|
+
warnings.warn(
|
|
81
|
+
"DaytonaSandbox was not properly cleaned up. "
|
|
82
|
+
"Use 'async with DaytonaSandbox(...) as sandbox:' for automatic cleanup.",
|
|
83
|
+
ResourceWarning,
|
|
84
|
+
stacklevel=2,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
async def _ensure_initialized(self):
|
|
88
|
+
"""Lazy initialization of sandbox"""
|
|
89
|
+
if self._initialized:
|
|
90
|
+
return
|
|
91
|
+
|
|
92
|
+
from daytona_sdk import ( # type: ignore
|
|
93
|
+
AsyncDaytona,
|
|
94
|
+
CreateSandboxBaseParams,
|
|
95
|
+
DaytonaConfig,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Initialize client with config
|
|
99
|
+
if self.api_key or self.api_url or self.target:
|
|
100
|
+
config = DaytonaConfig(
|
|
101
|
+
api_key=self.api_key, api_url=self.api_url, target=self.target
|
|
102
|
+
)
|
|
103
|
+
self.client = AsyncDaytona(config)
|
|
104
|
+
else:
|
|
105
|
+
# Use environment variables
|
|
106
|
+
self.client = AsyncDaytona()
|
|
107
|
+
|
|
108
|
+
if self.sandbox_id:
|
|
109
|
+
# Connect to existing sandbox - use find_one with id label
|
|
110
|
+
sandboxes = await self.client.list(labels={"id": self.sandbox_id})
|
|
111
|
+
if not sandboxes or not sandboxes.items:
|
|
112
|
+
raise ValueError(f"Sandbox with ID {self.sandbox_id} not found")
|
|
113
|
+
self.sandbox = sandboxes.items[0]
|
|
114
|
+
else:
|
|
115
|
+
# Create new sandbox with default configuration
|
|
116
|
+
params = CreateSandboxBaseParams(language=self.language) # type: ignore
|
|
117
|
+
self.sandbox = await self.client.create(params) # type: ignore
|
|
118
|
+
self.sandbox_id = self.sandbox.id
|
|
119
|
+
|
|
120
|
+
# Start sandbox if needed
|
|
121
|
+
if self.auto_start and self.sandbox.state != "started":
|
|
122
|
+
await self.sandbox.start()
|
|
123
|
+
|
|
124
|
+
self._initialized = True
|
|
125
|
+
|
|
126
|
+
async def _ensure_session_started(self):
|
|
127
|
+
"""Start the session for stateful mode if not already running."""
|
|
128
|
+
if self._session_initialized:
|
|
129
|
+
return
|
|
130
|
+
|
|
131
|
+
await self._ensure_initialized()
|
|
132
|
+
assert self.sandbox, "no sandbox"
|
|
133
|
+
|
|
134
|
+
# Generate a unique session ID
|
|
135
|
+
self._session_id = f"shell-{secrets.token_hex(8)}"
|
|
136
|
+
await self.sandbox.process.create_session(self._session_id)
|
|
137
|
+
self._session_initialized = True
|
|
138
|
+
|
|
139
|
+
async def _exec_stateful(
|
|
140
|
+
self,
|
|
141
|
+
command: str,
|
|
142
|
+
timeout: int | None = None,
|
|
143
|
+
) -> str:
|
|
144
|
+
"""Execute a command in the persistent session (stateful mode)."""
|
|
145
|
+
from daytona_sdk import SessionExecuteRequest # type: ignore
|
|
146
|
+
|
|
147
|
+
await self._ensure_session_started()
|
|
148
|
+
assert self.sandbox, "no sandbox"
|
|
149
|
+
assert self._session_id, "no session"
|
|
150
|
+
|
|
151
|
+
# Execute command in session
|
|
152
|
+
result = await self.sandbox.process.execute_session_command(
|
|
153
|
+
self._session_id,
|
|
154
|
+
SessionExecuteRequest(command=command, run_async=False), # type: ignore
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# Get output from stdout (may have some control chars at start)
|
|
158
|
+
output = result.stdout or ""
|
|
159
|
+
# Clean up any leading control characters
|
|
160
|
+
output = output.lstrip("\x01\x02\x03")
|
|
161
|
+
|
|
162
|
+
# Include exit code if non-zero
|
|
163
|
+
if result.exit_code != 0:
|
|
164
|
+
output = f"[Exit code: {result.exit_code}]\n{output}"
|
|
165
|
+
|
|
166
|
+
# Truncate if needed
|
|
167
|
+
if len(output) > 5000:
|
|
168
|
+
output = "...[truncated]...\n" + output[-5000:]
|
|
169
|
+
|
|
170
|
+
return output.strip() if output.strip() else "(no output)"
|
|
171
|
+
|
|
172
|
+
async def _exec(
|
|
173
|
+
self,
|
|
174
|
+
command: str,
|
|
175
|
+
timeout: int | None = 120000,
|
|
176
|
+
run_in_background: bool = False,
|
|
177
|
+
name: str | None = None,
|
|
178
|
+
description: str | None = None,
|
|
179
|
+
) -> str:
|
|
180
|
+
"""
|
|
181
|
+
Execute a shell command in the sandbox.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
command: Shell command to execute
|
|
185
|
+
timeout: Timeout in milliseconds (default: 120000 = 2 minutes, max: 600000)
|
|
186
|
+
run_in_background: If True, run in background and return immediately
|
|
187
|
+
name: Name for background process (auto-generated if not provided)
|
|
188
|
+
description: Short description of what this command does (for logging)
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Command output if foreground, or confirmation message if background
|
|
192
|
+
"""
|
|
193
|
+
await self._ensure_initialized()
|
|
194
|
+
|
|
195
|
+
# Convert timeout from milliseconds to seconds for Daytona API
|
|
196
|
+
timeout_seconds: int | None = None
|
|
197
|
+
if timeout is not None and not run_in_background:
|
|
198
|
+
timeout_seconds = min(timeout // 1000, 600) # Cap at 10 minutes
|
|
199
|
+
|
|
200
|
+
# Use stateful mode for foreground commands when enabled
|
|
201
|
+
if self.stateful and not run_in_background:
|
|
202
|
+
return await self._exec_stateful(command, timeout=timeout_seconds)
|
|
203
|
+
|
|
204
|
+
# Stateless mode: use process.exec
|
|
205
|
+
assert self.sandbox, "no sandbox"
|
|
206
|
+
result = await self.sandbox.process.exec(
|
|
207
|
+
command=command, cwd=".", timeout=timeout_seconds
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
# ExecutionResponse has .result (output) and .exit_code
|
|
211
|
+
output = result.result or ""
|
|
212
|
+
|
|
213
|
+
# Include exit code if non-zero
|
|
214
|
+
if result.exit_code != 0:
|
|
215
|
+
output = f"[Exit code: {result.exit_code}]\n{output}"
|
|
216
|
+
|
|
217
|
+
# Limit output to last 5000 characters to avoid overwhelming the LLM
|
|
218
|
+
if len(output) > 5000:
|
|
219
|
+
output = "...[truncated]...\n" + output[-5000:]
|
|
220
|
+
|
|
221
|
+
return output or "(no output)"
|
|
222
|
+
|
|
223
|
+
async def _read_file(self, path: str, max_size: int = 50000) -> str:
|
|
224
|
+
"""
|
|
225
|
+
Read a file from the sandbox.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
path: Path to the file in the sandbox
|
|
229
|
+
max_size: Maximum file size in bytes to read
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
File contents as string
|
|
233
|
+
"""
|
|
234
|
+
await self._ensure_initialized()
|
|
235
|
+
|
|
236
|
+
# API: download_file(remote_path, timeout=1800) -> bytes
|
|
237
|
+
assert self.sandbox, "no sandbox"
|
|
238
|
+
content_bytes = await self.sandbox.fs.download_file(path)
|
|
239
|
+
content = content_bytes.decode("utf-8", errors="replace")
|
|
240
|
+
|
|
241
|
+
if len(content) > max_size:
|
|
242
|
+
return f"File too large ({len(content)} bytes). First {max_size} bytes:\n{content[:max_size]}"
|
|
243
|
+
|
|
244
|
+
return content
|
|
245
|
+
|
|
246
|
+
async def _write_file(self, path: str, content: str) -> str:
|
|
247
|
+
"""
|
|
248
|
+
Write content to a file in the sandbox.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
path: Path to the file in the sandbox
|
|
252
|
+
content: Content to write
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
Success message
|
|
256
|
+
"""
|
|
257
|
+
await self._ensure_initialized()
|
|
258
|
+
assert self.sandbox, "no sandbox"
|
|
259
|
+
|
|
260
|
+
# API: upload_file(file: bytes, remote_path: str, timeout=1800) -> None
|
|
261
|
+
content_bytes = content.encode("utf-8")
|
|
262
|
+
await self.sandbox.fs.upload_file(content_bytes, path)
|
|
263
|
+
return f"Successfully wrote {len(content)} bytes to {path}"
|
|
264
|
+
|
|
265
|
+
async def _list_files(self, path: str = ".", pattern: str | None = None) -> str:
|
|
266
|
+
"""
|
|
267
|
+
List files in a directory.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
path: Directory path to list
|
|
271
|
+
pattern: Optional glob pattern to filter files
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
Formatted list of files
|
|
275
|
+
"""
|
|
276
|
+
await self._ensure_initialized()
|
|
277
|
+
assert self.sandbox, "no sandbox"
|
|
278
|
+
|
|
279
|
+
if pattern:
|
|
280
|
+
# API: find_files(path, pattern) -> List[Match]
|
|
281
|
+
matches = await self.sandbox.fs.find_files(path=path, pattern=pattern)
|
|
282
|
+
if not matches:
|
|
283
|
+
return f"No files matching '{pattern}' found in {path}"
|
|
284
|
+
|
|
285
|
+
# Format the matches
|
|
286
|
+
files = [match.file for match in matches]
|
|
287
|
+
return "\n".join(files)
|
|
288
|
+
else:
|
|
289
|
+
# API: list_files(path) -> List[FileInfo]
|
|
290
|
+
file_infos = await self.sandbox.fs.list_files(path=path)
|
|
291
|
+
|
|
292
|
+
if not file_infos:
|
|
293
|
+
return f"No files found in {path}"
|
|
294
|
+
|
|
295
|
+
# Format the output with file info
|
|
296
|
+
lines = []
|
|
297
|
+
for info in file_infos:
|
|
298
|
+
# FileInfo has .name, .size, .mode, .is_dir, etc
|
|
299
|
+
if info.is_dir:
|
|
300
|
+
lines.append(f"{info.name}/")
|
|
301
|
+
else:
|
|
302
|
+
lines.append(f"{info.name} ({info.size} bytes)")
|
|
303
|
+
return "\n".join(lines)
|
|
304
|
+
|
|
305
|
+
async def _get_preview_link(self, port: int = 8080) -> str:
|
|
306
|
+
"""
|
|
307
|
+
Get a preview link for exposing a port.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
port: Port number to expose
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
Preview URL and token information
|
|
314
|
+
"""
|
|
315
|
+
await self._ensure_initialized()
|
|
316
|
+
assert self.sandbox, "no sandbox"
|
|
317
|
+
preview = await self.sandbox.get_preview_link(port)
|
|
318
|
+
|
|
319
|
+
result = f"URL: {preview.url}"
|
|
320
|
+
if hasattr(preview, "token") and preview.token:
|
|
321
|
+
result += f"\nToken: {preview.token}"
|
|
322
|
+
|
|
323
|
+
return result
|
|
324
|
+
|
|
325
|
+
async def _get_working_dir(self) -> str:
|
|
326
|
+
"""Get the current working directory in the sandbox."""
|
|
327
|
+
await self._ensure_initialized()
|
|
328
|
+
assert self.sandbox, "no sandbox"
|
|
329
|
+
return await self.sandbox.get_work_dir()
|
|
330
|
+
|
|
331
|
+
async def _destroy(self):
|
|
332
|
+
"""Delete the sandbox and clean up resources."""
|
|
333
|
+
if self.sandbox and not self._destroyed:
|
|
334
|
+
# Clean up session if in stateful mode
|
|
335
|
+
if self._session_initialized and self._session_id:
|
|
336
|
+
try:
|
|
337
|
+
await self.sandbox.process.delete_session(self._session_id)
|
|
338
|
+
except Exception:
|
|
339
|
+
pass
|
|
340
|
+
self._session_id = None
|
|
341
|
+
self._session_initialized = False
|
|
342
|
+
|
|
343
|
+
await self.sandbox.delete()
|
|
344
|
+
self._destroyed = True
|
|
345
|
+
self._initialized = False
|
|
346
|
+
self.sandbox = None
|
|
347
|
+
|
|
348
|
+
def get_tools(self):
|
|
349
|
+
"""Return list of tools for LLM use."""
|
|
350
|
+
if self.stateful:
|
|
351
|
+
bash_description = (
|
|
352
|
+
"Execute a bash command in the Daytona sandbox environment. "
|
|
353
|
+
"This sandbox maintains state between commands - shell variables, "
|
|
354
|
+
"working directory (cd), and functions persist across calls. "
|
|
355
|
+
"Output is truncated to the last 5000 characters if longer."
|
|
356
|
+
)
|
|
357
|
+
else:
|
|
358
|
+
bash_description = (
|
|
359
|
+
"Execute a bash command in the Daytona sandbox environment. "
|
|
360
|
+
"Each command runs in a fresh shell (no state persistence between commands). "
|
|
361
|
+
"Output is truncated to the last 5000 characters if longer."
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
bash_tool = Tool(
|
|
365
|
+
name="bash",
|
|
366
|
+
description=bash_description,
|
|
367
|
+
run=self._exec,
|
|
368
|
+
parameters={
|
|
369
|
+
"command": {
|
|
370
|
+
"type": "string",
|
|
371
|
+
"description": "Shell command to execute (e.g., 'ls -la', 'python script.py')",
|
|
372
|
+
},
|
|
373
|
+
"description": {
|
|
374
|
+
"type": "string",
|
|
375
|
+
"description": "Short description of what this command does (5-10 words)",
|
|
376
|
+
},
|
|
377
|
+
"timeout": {
|
|
378
|
+
"type": "integer",
|
|
379
|
+
"description": "Timeout in milliseconds (default: 120000, max: 600000)",
|
|
380
|
+
},
|
|
381
|
+
},
|
|
382
|
+
required=["command"],
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
read_file_tool = Tool(
|
|
386
|
+
name="read_file",
|
|
387
|
+
description=(
|
|
388
|
+
"Read the contents of a file from the sandbox filesystem. "
|
|
389
|
+
"Provide the absolute or relative path to the file. "
|
|
390
|
+
"Files larger than 50KB are truncated."
|
|
391
|
+
),
|
|
392
|
+
run=self._read_file,
|
|
393
|
+
parameters={
|
|
394
|
+
"path": {
|
|
395
|
+
"type": "string",
|
|
396
|
+
"description": "Path to the file to read (e.g., '/home/user/script.py')",
|
|
397
|
+
},
|
|
398
|
+
"max_size": {
|
|
399
|
+
"type": "integer",
|
|
400
|
+
"description": "Maximum file size in bytes to read (default: 50000)",
|
|
401
|
+
},
|
|
402
|
+
},
|
|
403
|
+
required=["path"],
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
write_file_tool = Tool(
|
|
407
|
+
name="write_file",
|
|
408
|
+
description=(
|
|
409
|
+
"Write content to a file in the sandbox filesystem. "
|
|
410
|
+
"Creates the file if it doesn't exist, overwrites if it does. "
|
|
411
|
+
"Parent directories must exist."
|
|
412
|
+
),
|
|
413
|
+
run=self._write_file,
|
|
414
|
+
parameters={
|
|
415
|
+
"path": {
|
|
416
|
+
"type": "string",
|
|
417
|
+
"description": "Path where to write the file (e.g., '/home/user/script.py')",
|
|
418
|
+
},
|
|
419
|
+
"content": {
|
|
420
|
+
"type": "string",
|
|
421
|
+
"description": "Content to write to the file",
|
|
422
|
+
},
|
|
423
|
+
},
|
|
424
|
+
required=["path", "content"],
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
list_files_tool = Tool(
|
|
428
|
+
name="list_files",
|
|
429
|
+
description=(
|
|
430
|
+
"List files and directories in the sandbox filesystem. "
|
|
431
|
+
"Useful for exploring the sandbox environment and finding files. "
|
|
432
|
+
"Optionally filter by glob pattern (e.g., '*.py', '**/*.txt')."
|
|
433
|
+
),
|
|
434
|
+
run=self._list_files,
|
|
435
|
+
parameters={
|
|
436
|
+
"path": {
|
|
437
|
+
"type": "string",
|
|
438
|
+
"description": "Directory path to list (default: current directory)",
|
|
439
|
+
},
|
|
440
|
+
"pattern": {
|
|
441
|
+
"type": "string",
|
|
442
|
+
"description": "Glob pattern to filter files (e.g., '*.py', '**/*.txt')",
|
|
443
|
+
},
|
|
444
|
+
},
|
|
445
|
+
required=[],
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
preview_tool = Tool(
|
|
449
|
+
name="get_preview_link",
|
|
450
|
+
description=(
|
|
451
|
+
"Get a public URL to access a port in the sandbox. "
|
|
452
|
+
"Useful for exposing web servers or applications running in the sandbox. "
|
|
453
|
+
"Returns a URL and authentication token if needed."
|
|
454
|
+
),
|
|
455
|
+
run=self._get_preview_link,
|
|
456
|
+
parameters={
|
|
457
|
+
"port": {
|
|
458
|
+
"type": "integer",
|
|
459
|
+
"description": "Port number to expose (default: 8080)",
|
|
460
|
+
},
|
|
461
|
+
},
|
|
462
|
+
required=[],
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
workdir_tool = Tool(
|
|
466
|
+
name="get_working_directory",
|
|
467
|
+
description=(
|
|
468
|
+
"Get the current working directory path in the sandbox. "
|
|
469
|
+
"Useful for understanding the sandbox environment layout."
|
|
470
|
+
),
|
|
471
|
+
run=self._get_working_dir,
|
|
472
|
+
parameters={},
|
|
473
|
+
required=[],
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
return [
|
|
477
|
+
bash_tool,
|
|
478
|
+
read_file_tool,
|
|
479
|
+
write_file_tool,
|
|
480
|
+
list_files_tool,
|
|
481
|
+
preview_tool,
|
|
482
|
+
workdir_tool,
|
|
483
|
+
]
|