indent 0.1.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- exponent/__init__.py +34 -0
- exponent/cli.py +110 -0
- exponent/commands/cloud_commands.py +585 -0
- exponent/commands/common.py +411 -0
- exponent/commands/config_commands.py +334 -0
- exponent/commands/run_commands.py +222 -0
- exponent/commands/settings.py +56 -0
- exponent/commands/types.py +111 -0
- exponent/commands/upgrade.py +29 -0
- exponent/commands/utils.py +146 -0
- exponent/core/config.py +180 -0
- exponent/core/graphql/__init__.py +0 -0
- exponent/core/graphql/client.py +61 -0
- exponent/core/graphql/get_chats_query.py +47 -0
- exponent/core/graphql/mutations.py +160 -0
- exponent/core/graphql/queries.py +146 -0
- exponent/core/graphql/subscriptions.py +16 -0
- exponent/core/remote_execution/checkpoints.py +212 -0
- exponent/core/remote_execution/cli_rpc_types.py +499 -0
- exponent/core/remote_execution/client.py +999 -0
- exponent/core/remote_execution/code_execution.py +77 -0
- exponent/core/remote_execution/default_env.py +31 -0
- exponent/core/remote_execution/error_info.py +45 -0
- exponent/core/remote_execution/exceptions.py +10 -0
- exponent/core/remote_execution/file_write.py +35 -0
- exponent/core/remote_execution/files.py +330 -0
- exponent/core/remote_execution/git.py +268 -0
- exponent/core/remote_execution/http_fetch.py +94 -0
- exponent/core/remote_execution/languages/python_execution.py +239 -0
- exponent/core/remote_execution/languages/shell_streaming.py +226 -0
- exponent/core/remote_execution/languages/types.py +20 -0
- exponent/core/remote_execution/port_utils.py +73 -0
- exponent/core/remote_execution/session.py +128 -0
- exponent/core/remote_execution/system_context.py +26 -0
- exponent/core/remote_execution/terminal_session.py +375 -0
- exponent/core/remote_execution/terminal_types.py +29 -0
- exponent/core/remote_execution/tool_execution.py +595 -0
- exponent/core/remote_execution/tool_type_utils.py +39 -0
- exponent/core/remote_execution/truncation.py +296 -0
- exponent/core/remote_execution/types.py +635 -0
- exponent/core/remote_execution/utils.py +477 -0
- exponent/core/types/__init__.py +0 -0
- exponent/core/types/command_data.py +206 -0
- exponent/core/types/event_types.py +89 -0
- exponent/core/types/generated/__init__.py +0 -0
- exponent/core/types/generated/strategy_info.py +213 -0
- exponent/migration-docs/login.md +112 -0
- exponent/py.typed +4 -0
- exponent/utils/__init__.py +0 -0
- exponent/utils/colors.py +92 -0
- exponent/utils/version.py +289 -0
- indent-0.1.26.dist-info/METADATA +38 -0
- indent-0.1.26.dist-info/RECORD +55 -0
- indent-0.1.26.dist-info/WHEEL +4 -0
- indent-0.1.26.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,595 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import uuid
|
|
4
|
+
from collections.abc import Callable
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from time import time
|
|
7
|
+
from typing import TYPE_CHECKING
|
|
8
|
+
|
|
9
|
+
from anyio import Path as AsyncPath
|
|
10
|
+
|
|
11
|
+
from exponent.core.remote_execution import files
|
|
12
|
+
from exponent.core.remote_execution.cli_rpc_types import (
|
|
13
|
+
BashToolInput,
|
|
14
|
+
BashToolResult,
|
|
15
|
+
DownloadArtifactToolInput,
|
|
16
|
+
DownloadArtifactToolResult,
|
|
17
|
+
EditToolInput,
|
|
18
|
+
EditToolResult,
|
|
19
|
+
ErrorToolResult,
|
|
20
|
+
GlobToolInput,
|
|
21
|
+
GlobToolResult,
|
|
22
|
+
GrepToolInput,
|
|
23
|
+
GrepToolResult,
|
|
24
|
+
ListToolInput,
|
|
25
|
+
ListToolResult,
|
|
26
|
+
ReadToolArtifactResult,
|
|
27
|
+
ReadToolInput,
|
|
28
|
+
ReadToolResult,
|
|
29
|
+
ToolInputType,
|
|
30
|
+
ToolResultType,
|
|
31
|
+
UploadArtifactToolInput,
|
|
32
|
+
UploadArtifactToolResult,
|
|
33
|
+
WriteToolInput,
|
|
34
|
+
WriteToolResult,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
if TYPE_CHECKING:
|
|
38
|
+
from exponent.core.remote_execution.client import RemoteExecutionClient
|
|
39
|
+
import urllib.request
|
|
40
|
+
|
|
41
|
+
import aiohttp
|
|
42
|
+
|
|
43
|
+
from exponent.core.remote_execution.cli_rpc_types import (
|
|
44
|
+
StreamingCodeExecutionRequest,
|
|
45
|
+
StreamingCodeExecutionResponse,
|
|
46
|
+
)
|
|
47
|
+
from exponent.core.remote_execution.code_execution import (
|
|
48
|
+
execute_code_streaming,
|
|
49
|
+
)
|
|
50
|
+
from exponent.core.remote_execution.file_write import execute_full_file_rewrite
|
|
51
|
+
from exponent.core.remote_execution.truncation import truncate_tool_result
|
|
52
|
+
from exponent.core.remote_execution.utils import (
|
|
53
|
+
assert_unreachable,
|
|
54
|
+
safe_get_file_metadata,
|
|
55
|
+
safe_read_file,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
logger = logging.getLogger(__name__)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
async def execute_tool(
|
|
62
|
+
tool_input: ToolInputType,
|
|
63
|
+
working_directory: str,
|
|
64
|
+
upload_client: "RemoteExecutionClient | None" = None,
|
|
65
|
+
) -> ToolResultType:
|
|
66
|
+
if isinstance(tool_input, ReadToolInput):
|
|
67
|
+
return await execute_read_file(tool_input, working_directory, upload_client)
|
|
68
|
+
elif isinstance(tool_input, WriteToolInput):
|
|
69
|
+
return await execute_write_file(tool_input, working_directory)
|
|
70
|
+
elif isinstance(tool_input, ListToolInput):
|
|
71
|
+
return await execute_list_files(tool_input, working_directory)
|
|
72
|
+
elif isinstance(tool_input, GlobToolInput):
|
|
73
|
+
return await execute_glob_files(tool_input, working_directory)
|
|
74
|
+
elif isinstance(tool_input, GrepToolInput):
|
|
75
|
+
return await execute_grep_files(tool_input, working_directory)
|
|
76
|
+
elif isinstance(tool_input, EditToolInput):
|
|
77
|
+
return await execute_edit_file(tool_input, working_directory)
|
|
78
|
+
elif isinstance(tool_input, DownloadArtifactToolInput):
|
|
79
|
+
return await execute_download_artifact(tool_input, working_directory)
|
|
80
|
+
elif isinstance(tool_input, UploadArtifactToolInput):
|
|
81
|
+
return await execute_upload_artifact(tool_input, working_directory)
|
|
82
|
+
elif isinstance(tool_input, BashToolInput):
|
|
83
|
+
raise ValueError("Bash tool input should be handled by execute_bash_tool")
|
|
84
|
+
else:
|
|
85
|
+
assert_unreachable(tool_input)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def truncate_result[T: ToolResultType](tool_result: T) -> T:
|
|
89
|
+
return truncate_tool_result(tool_result)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def is_image_file(file_path: str) -> tuple[bool, str | None]:
|
|
93
|
+
ext = Path(file_path).suffix.lower()
|
|
94
|
+
if ext == ".png":
|
|
95
|
+
return (True, "image/png")
|
|
96
|
+
elif ext in [".jpg", ".jpeg"]:
|
|
97
|
+
return (True, "image/jpeg")
|
|
98
|
+
return (False, None)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
async def execute_read_file( # noqa: PLR0911, PLR0915
|
|
102
|
+
tool_input: ReadToolInput,
|
|
103
|
+
working_directory: str,
|
|
104
|
+
upload_client: "RemoteExecutionClient | None" = None,
|
|
105
|
+
) -> ReadToolResult | ErrorToolResult:
|
|
106
|
+
# Validate absolute path requirement
|
|
107
|
+
if not tool_input.file_path.startswith("/"):
|
|
108
|
+
return ErrorToolResult(
|
|
109
|
+
error_message=f"File path must be absolute, got relative path: {tool_input.file_path}"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Validate offset and limit
|
|
113
|
+
offset = tool_input.offset if tool_input.offset is not None else 0
|
|
114
|
+
limit = tool_input.limit if tool_input.limit is not None else 2000
|
|
115
|
+
|
|
116
|
+
if limit <= 0:
|
|
117
|
+
return ErrorToolResult(error_message=f"Limit must be positive, got: {limit}")
|
|
118
|
+
|
|
119
|
+
file = AsyncPath(working_directory, tool_input.file_path)
|
|
120
|
+
|
|
121
|
+
# Check if this is an image file and we have an upload client
|
|
122
|
+
is_image, media_type = is_image_file(tool_input.file_path)
|
|
123
|
+
if is_image and media_type and upload_client is not None:
|
|
124
|
+
try:
|
|
125
|
+
file_name = Path(tool_input.file_path).name
|
|
126
|
+
s3_key = f"images/{uuid.uuid4()}/{file_name}"
|
|
127
|
+
|
|
128
|
+
upload_response = await upload_client.request_upload_url(s3_key, media_type)
|
|
129
|
+
|
|
130
|
+
f = await file.open("rb")
|
|
131
|
+
async with f:
|
|
132
|
+
file_data = await f.read()
|
|
133
|
+
|
|
134
|
+
def _upload() -> int:
|
|
135
|
+
req = urllib.request.Request(
|
|
136
|
+
upload_response.upload_url,
|
|
137
|
+
data=file_data,
|
|
138
|
+
headers={"Content-Type": media_type},
|
|
139
|
+
method="PUT",
|
|
140
|
+
)
|
|
141
|
+
with urllib.request.urlopen(req) as resp:
|
|
142
|
+
status: int = resp.status
|
|
143
|
+
return status
|
|
144
|
+
|
|
145
|
+
status = await asyncio.to_thread(_upload)
|
|
146
|
+
if status != 200:
|
|
147
|
+
raise RuntimeError(f"Upload failed with status {status}")
|
|
148
|
+
|
|
149
|
+
return ReadToolResult(
|
|
150
|
+
artifact=ReadToolArtifactResult(
|
|
151
|
+
s3_uri=upload_response.s3_uri,
|
|
152
|
+
file_path=tool_input.file_path,
|
|
153
|
+
media_type=media_type,
|
|
154
|
+
)
|
|
155
|
+
)
|
|
156
|
+
except Exception as e:
|
|
157
|
+
return ErrorToolResult(error_message=f"Failed to upload image to S3: {e!s}")
|
|
158
|
+
|
|
159
|
+
try:
|
|
160
|
+
exists = await file.exists()
|
|
161
|
+
except (OSError, PermissionError) as e:
|
|
162
|
+
return ErrorToolResult(error_message=f"Cannot access file: {e!s}")
|
|
163
|
+
|
|
164
|
+
if not exists:
|
|
165
|
+
return ErrorToolResult(
|
|
166
|
+
error_message="File not found",
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
try:
|
|
170
|
+
if await file.is_dir():
|
|
171
|
+
return ErrorToolResult(
|
|
172
|
+
error_message=f"{await file.absolute()} is a directory",
|
|
173
|
+
)
|
|
174
|
+
except (OSError, PermissionError) as e:
|
|
175
|
+
return ErrorToolResult(error_message=f"Cannot check file type: {e!s}")
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
content = await safe_read_file(file)
|
|
179
|
+
except PermissionError:
|
|
180
|
+
return ErrorToolResult(
|
|
181
|
+
error_message=f"Permission denied: cannot read {tool_input.file_path}"
|
|
182
|
+
)
|
|
183
|
+
except UnicodeDecodeError:
|
|
184
|
+
return ErrorToolResult(
|
|
185
|
+
error_message="File appears to be binary or has invalid text encoding"
|
|
186
|
+
)
|
|
187
|
+
except Exception as e:
|
|
188
|
+
return ErrorToolResult(error_message=f"Error reading file: {e!s}")
|
|
189
|
+
|
|
190
|
+
metadata = await safe_get_file_metadata(file)
|
|
191
|
+
|
|
192
|
+
# Handle empty files
|
|
193
|
+
if not content:
|
|
194
|
+
return ReadToolResult(
|
|
195
|
+
content="",
|
|
196
|
+
num_lines=0,
|
|
197
|
+
start_line=0,
|
|
198
|
+
total_lines=0,
|
|
199
|
+
metadata=metadata,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
content_lines = content.splitlines(keepends=True)
|
|
203
|
+
total_lines = len(content_lines)
|
|
204
|
+
|
|
205
|
+
# Handle offset beyond file length for positive offsets
|
|
206
|
+
if offset >= 0 and offset >= total_lines:
|
|
207
|
+
return ReadToolResult(
|
|
208
|
+
content="",
|
|
209
|
+
num_lines=0,
|
|
210
|
+
start_line=offset,
|
|
211
|
+
total_lines=total_lines,
|
|
212
|
+
metadata=metadata,
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
# Use Python's native slicing - it handles negative offsets naturally
|
|
216
|
+
# Handle the case where offset + limit < 0 (can't mix negative and non-negative indices)
|
|
217
|
+
if offset < 0 and offset + limit < 0:
|
|
218
|
+
# Both start and end are negative, use negative end index
|
|
219
|
+
end_index = offset + limit
|
|
220
|
+
elif offset < 0 and offset + limit >= 0:
|
|
221
|
+
# Start is negative but end would be positive/zero, slice to end
|
|
222
|
+
end_index = None
|
|
223
|
+
else:
|
|
224
|
+
# Normal case: both indices are non-negative
|
|
225
|
+
end_index = offset + limit
|
|
226
|
+
|
|
227
|
+
content_lines = content_lines[offset:end_index]
|
|
228
|
+
|
|
229
|
+
# Calculate the actual start line for the result
|
|
230
|
+
if offset < 0:
|
|
231
|
+
# For negative offsets, calculate where we actually started
|
|
232
|
+
actual_start_line = max(0, total_lines + offset)
|
|
233
|
+
else:
|
|
234
|
+
actual_start_line = offset
|
|
235
|
+
|
|
236
|
+
# Apply character-level truncation at line boundaries to ensure consistency
|
|
237
|
+
# This ensures the content field and num_lines field remain in sync
|
|
238
|
+
CHARACTER_LIMIT = 90_000 # Match the limit in truncation.py
|
|
239
|
+
|
|
240
|
+
# Join lines and check total size
|
|
241
|
+
final_content = "".join(content_lines)
|
|
242
|
+
|
|
243
|
+
if len(final_content) > CHARACTER_LIMIT:
|
|
244
|
+
# Truncate at line boundaries to stay under the limit
|
|
245
|
+
truncated_lines: list[str] = []
|
|
246
|
+
current_size = 0
|
|
247
|
+
truncation_message = "\n[Content truncated due to size limit]"
|
|
248
|
+
truncation_size = len(truncation_message)
|
|
249
|
+
lines_included = 0
|
|
250
|
+
|
|
251
|
+
for line in content_lines:
|
|
252
|
+
# Check if adding this line would exceed the limit (accounting for truncation message)
|
|
253
|
+
if current_size + len(line) + truncation_size > CHARACTER_LIMIT:
|
|
254
|
+
final_content = "".join(truncated_lines) + truncation_message
|
|
255
|
+
break
|
|
256
|
+
truncated_lines.append(line)
|
|
257
|
+
current_size += len(line)
|
|
258
|
+
lines_included += 1
|
|
259
|
+
else:
|
|
260
|
+
# All lines fit (shouldn't happen if we got here, but be safe)
|
|
261
|
+
final_content = "".join(truncated_lines)
|
|
262
|
+
lines_included = len(content_lines)
|
|
263
|
+
|
|
264
|
+
num_lines = lines_included
|
|
265
|
+
else:
|
|
266
|
+
num_lines = len(content_lines)
|
|
267
|
+
|
|
268
|
+
return ReadToolResult(
|
|
269
|
+
content=final_content,
|
|
270
|
+
num_lines=num_lines,
|
|
271
|
+
start_line=actual_start_line,
|
|
272
|
+
total_lines=total_lines,
|
|
273
|
+
metadata=metadata,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
async def execute_write_file(
|
|
278
|
+
tool_input: WriteToolInput, working_directory: str
|
|
279
|
+
) -> WriteToolResult:
|
|
280
|
+
file_path = tool_input.file_path
|
|
281
|
+
path = Path(working_directory, file_path)
|
|
282
|
+
result = await execute_full_file_rewrite(
|
|
283
|
+
path, tool_input.content, working_directory
|
|
284
|
+
)
|
|
285
|
+
return WriteToolResult(message=result)
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
async def execute_edit_file( # noqa: PLR0911
|
|
289
|
+
tool_input: EditToolInput, working_directory: str
|
|
290
|
+
) -> EditToolResult | ErrorToolResult:
|
|
291
|
+
# Validate absolute path requirement
|
|
292
|
+
if not tool_input.file_path.startswith("/"):
|
|
293
|
+
return ErrorToolResult(
|
|
294
|
+
error_message=f"File path must be absolute, got relative path: {tool_input.file_path}"
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
file = AsyncPath(working_directory, tool_input.file_path)
|
|
298
|
+
|
|
299
|
+
try:
|
|
300
|
+
exists = await file.exists()
|
|
301
|
+
except (OSError, PermissionError) as e:
|
|
302
|
+
return ErrorToolResult(error_message=f"Cannot access file: {e!s}")
|
|
303
|
+
|
|
304
|
+
if not exists:
|
|
305
|
+
return ErrorToolResult(error_message="File not found")
|
|
306
|
+
|
|
307
|
+
if tool_input.last_known_modified_timestamp is not None:
|
|
308
|
+
metadata = await safe_get_file_metadata(file)
|
|
309
|
+
if (
|
|
310
|
+
metadata is not None
|
|
311
|
+
and metadata.modified_timestamp > tool_input.last_known_modified_timestamp
|
|
312
|
+
):
|
|
313
|
+
return ErrorToolResult(
|
|
314
|
+
error_message="File has been modified since last read/write"
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
try:
|
|
318
|
+
if await file.is_dir():
|
|
319
|
+
return ErrorToolResult(
|
|
320
|
+
error_message=f"{await file.absolute()} is a directory"
|
|
321
|
+
)
|
|
322
|
+
except (OSError, PermissionError) as e:
|
|
323
|
+
return ErrorToolResult(error_message=f"Cannot check file type: {e!s}")
|
|
324
|
+
|
|
325
|
+
try:
|
|
326
|
+
# Read the entire file without truncation limits
|
|
327
|
+
content = await safe_read_file(file)
|
|
328
|
+
except PermissionError:
|
|
329
|
+
return ErrorToolResult(
|
|
330
|
+
error_message=f"Permission denied: cannot read {tool_input.file_path}"
|
|
331
|
+
)
|
|
332
|
+
except UnicodeDecodeError:
|
|
333
|
+
return ErrorToolResult(
|
|
334
|
+
error_message="File appears to be binary or has invalid text encoding"
|
|
335
|
+
)
|
|
336
|
+
except Exception as e:
|
|
337
|
+
return ErrorToolResult(error_message=f"Error reading file: {e!s}")
|
|
338
|
+
|
|
339
|
+
# Check if search text exists
|
|
340
|
+
if tool_input.old_string not in content:
|
|
341
|
+
return ErrorToolResult(
|
|
342
|
+
error_message=f"Search text not found in {tool_input.file_path}"
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
# Check if old_string and new_string are identical
|
|
346
|
+
if tool_input.old_string == tool_input.new_string:
|
|
347
|
+
return ErrorToolResult(error_message="Old string and new string are identical")
|
|
348
|
+
|
|
349
|
+
# Check uniqueness if replace_all is False
|
|
350
|
+
if not tool_input.replace_all:
|
|
351
|
+
occurrences = content.count(tool_input.old_string)
|
|
352
|
+
if occurrences > 1:
|
|
353
|
+
return ErrorToolResult(
|
|
354
|
+
error_message=f"String '{tool_input.old_string}' appears {occurrences} times in file. Use a larger context or replace_all=True"
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
# Perform replacement
|
|
358
|
+
if tool_input.replace_all:
|
|
359
|
+
new_content = content.replace(tool_input.old_string, tool_input.new_string)
|
|
360
|
+
else:
|
|
361
|
+
# Replace only the first occurrence
|
|
362
|
+
new_content = content.replace(tool_input.old_string, tool_input.new_string, 1)
|
|
363
|
+
|
|
364
|
+
# Write back to file
|
|
365
|
+
try:
|
|
366
|
+
path = Path(working_directory, tool_input.file_path)
|
|
367
|
+
await execute_full_file_rewrite(path, new_content, working_directory)
|
|
368
|
+
return EditToolResult(
|
|
369
|
+
message=f"Successfully replaced text in {tool_input.file_path}",
|
|
370
|
+
metadata=await safe_get_file_metadata(path),
|
|
371
|
+
)
|
|
372
|
+
except Exception as e:
|
|
373
|
+
return ErrorToolResult(error_message=f"Error writing file: {e!s}")
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
async def execute_list_files(
|
|
377
|
+
tool_input: ListToolInput, working_directory: str
|
|
378
|
+
) -> ListToolResult | ErrorToolResult:
|
|
379
|
+
path = AsyncPath(tool_input.path)
|
|
380
|
+
|
|
381
|
+
try:
|
|
382
|
+
exists = await path.exists()
|
|
383
|
+
except (OSError, PermissionError) as e:
|
|
384
|
+
return ErrorToolResult(error_message=f"Cannot access path: {e!s}")
|
|
385
|
+
|
|
386
|
+
if not exists:
|
|
387
|
+
return ErrorToolResult(error_message=f"Directory not found: {tool_input.path}")
|
|
388
|
+
|
|
389
|
+
try:
|
|
390
|
+
is_dir = await path.is_dir()
|
|
391
|
+
except (OSError, PermissionError) as e:
|
|
392
|
+
return ErrorToolResult(
|
|
393
|
+
error_message=f"Cannot check if path is directory: {e!s}"
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
if not is_dir:
|
|
397
|
+
return ErrorToolResult(
|
|
398
|
+
error_message=f"Path is not a directory: {tool_input.path}"
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
try:
|
|
402
|
+
filenames = [entry.name async for entry in path.iterdir()]
|
|
403
|
+
except (OSError, PermissionError) as e:
|
|
404
|
+
return ErrorToolResult(error_message=f"Cannot list directory contents: {e!s}")
|
|
405
|
+
|
|
406
|
+
return ListToolResult(
|
|
407
|
+
files=[filename for filename in filenames],
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
async def execute_glob_files(
|
|
412
|
+
tool_input: GlobToolInput, working_directory: str
|
|
413
|
+
) -> GlobToolResult:
|
|
414
|
+
# async timer
|
|
415
|
+
start_time = time()
|
|
416
|
+
results = await files.glob(
|
|
417
|
+
path=working_directory if tool_input.path is None else tool_input.path,
|
|
418
|
+
glob_pattern=tool_input.pattern,
|
|
419
|
+
)
|
|
420
|
+
duration_ms = int((time() - start_time) * 1000)
|
|
421
|
+
return GlobToolResult(
|
|
422
|
+
filenames=results,
|
|
423
|
+
duration_ms=duration_ms,
|
|
424
|
+
num_files=len(results),
|
|
425
|
+
truncated=len(results) >= files.GLOB_MAX_COUNT,
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
async def execute_grep_files(
|
|
430
|
+
tool_input: GrepToolInput, working_directory: str
|
|
431
|
+
) -> GrepToolResult | ErrorToolResult:
|
|
432
|
+
return await files.search_files(
|
|
433
|
+
path_str=working_directory if tool_input.path is None else tool_input.path,
|
|
434
|
+
file_pattern=tool_input.include,
|
|
435
|
+
regex=tool_input.pattern,
|
|
436
|
+
working_directory=working_directory,
|
|
437
|
+
multiline=tool_input.multiline,
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
async def execute_bash_tool(
|
|
442
|
+
tool_input: BashToolInput, working_directory: str, should_halt: Callable[[], bool]
|
|
443
|
+
) -> BashToolResult:
|
|
444
|
+
start_time = time()
|
|
445
|
+
result = None
|
|
446
|
+
async for result in execute_code_streaming(
|
|
447
|
+
StreamingCodeExecutionRequest(
|
|
448
|
+
language="shell",
|
|
449
|
+
content=tool_input.command,
|
|
450
|
+
timeout=120 if tool_input.timeout is None else tool_input.timeout,
|
|
451
|
+
correlation_id=str(uuid.uuid4()),
|
|
452
|
+
),
|
|
453
|
+
working_directory=working_directory,
|
|
454
|
+
session=None, # type: ignore
|
|
455
|
+
should_halt=should_halt,
|
|
456
|
+
):
|
|
457
|
+
pass
|
|
458
|
+
|
|
459
|
+
assert isinstance(result, StreamingCodeExecutionResponse)
|
|
460
|
+
|
|
461
|
+
return BashToolResult(
|
|
462
|
+
shell_output=result.content,
|
|
463
|
+
exit_code=result.exit_code,
|
|
464
|
+
duration_ms=int((time() - start_time) * 1000),
|
|
465
|
+
timed_out=result.cancelled_for_timeout,
|
|
466
|
+
stopped_by_user=result.halted,
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
async def execute_download_artifact(
|
|
471
|
+
tool_input: DownloadArtifactToolInput, working_directory: str
|
|
472
|
+
) -> DownloadArtifactToolResult | ErrorToolResult:
|
|
473
|
+
"""Download an artifact from S3 using a pre-signed URL."""
|
|
474
|
+
|
|
475
|
+
# Validate absolute path
|
|
476
|
+
if not tool_input.file_path.startswith("/"):
|
|
477
|
+
return ErrorToolResult(
|
|
478
|
+
error_message=f"File path must be absolute, got relative path: {tool_input.file_path}"
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# Check if file exists and overwrite is False
|
|
482
|
+
file_path = Path(tool_input.file_path)
|
|
483
|
+
if file_path.exists() and not tool_input.overwrite:
|
|
484
|
+
return ErrorToolResult(
|
|
485
|
+
error_message=f"File already exists: {tool_input.file_path}. Set overwrite=True to replace it."
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
try:
|
|
489
|
+
# Download from pre-signed URL
|
|
490
|
+
async with aiohttp.ClientSession() as session:
|
|
491
|
+
async with session.get(tool_input.presigned_url) as response:
|
|
492
|
+
if response.status != 200:
|
|
493
|
+
error_text = await response.text()
|
|
494
|
+
return ErrorToolResult(
|
|
495
|
+
error_message=f"Failed to download artifact: HTTP {response.status} - {error_text}"
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
# Create parent directory if needed
|
|
499
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
500
|
+
|
|
501
|
+
# Write file
|
|
502
|
+
content = await response.read()
|
|
503
|
+
file_path.write_bytes(content)
|
|
504
|
+
|
|
505
|
+
file_size = len(content)
|
|
506
|
+
|
|
507
|
+
# Try to generate content preview for text files
|
|
508
|
+
# Attempt to decode as UTF-8 to determine if it's a text file
|
|
509
|
+
content_preview = None
|
|
510
|
+
num_lines = None
|
|
511
|
+
total_lines = None
|
|
512
|
+
truncated = False
|
|
513
|
+
|
|
514
|
+
try:
|
|
515
|
+
text_content = content.decode("utf-8")
|
|
516
|
+
lines = text_content.splitlines()
|
|
517
|
+
total_lines = len(lines)
|
|
518
|
+
|
|
519
|
+
# Show first 50 lines
|
|
520
|
+
preview_limit = 50
|
|
521
|
+
if len(lines) > preview_limit:
|
|
522
|
+
preview_lines = lines[:preview_limit]
|
|
523
|
+
truncated = True
|
|
524
|
+
num_lines = preview_limit
|
|
525
|
+
else:
|
|
526
|
+
preview_lines = lines
|
|
527
|
+
num_lines = len(lines)
|
|
528
|
+
|
|
529
|
+
content_preview = "\n".join(preview_lines)
|
|
530
|
+
except UnicodeDecodeError:
|
|
531
|
+
# Binary file, skip preview
|
|
532
|
+
pass
|
|
533
|
+
|
|
534
|
+
return DownloadArtifactToolResult(
|
|
535
|
+
file_path=tool_input.file_path,
|
|
536
|
+
artifact_id=tool_input.artifact_id,
|
|
537
|
+
file_size_bytes=file_size,
|
|
538
|
+
content_preview=content_preview,
|
|
539
|
+
num_lines=num_lines,
|
|
540
|
+
total_lines=total_lines,
|
|
541
|
+
truncated=truncated,
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
except Exception as e:
|
|
545
|
+
logger.exception("Failed to download artifact")
|
|
546
|
+
return ErrorToolResult(error_message=f"Failed to download artifact: {e!s}")
|
|
547
|
+
|
|
548
|
+
|
|
549
|
+
async def execute_upload_artifact(
|
|
550
|
+
tool_input: UploadArtifactToolInput, working_directory: str
|
|
551
|
+
) -> UploadArtifactToolResult | ErrorToolResult:
|
|
552
|
+
"""Upload an artifact to S3 using a pre-signed URL."""
|
|
553
|
+
|
|
554
|
+
# Validate absolute path
|
|
555
|
+
if not tool_input.file_path.startswith("/"):
|
|
556
|
+
return ErrorToolResult(
|
|
557
|
+
error_message=f"File path must be absolute, got relative path: {tool_input.file_path}"
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
# Check if file exists
|
|
561
|
+
file_path = Path(tool_input.file_path)
|
|
562
|
+
if not file_path.exists():
|
|
563
|
+
return ErrorToolResult(error_message=f"File not found: {tool_input.file_path}")
|
|
564
|
+
|
|
565
|
+
if not file_path.is_file():
|
|
566
|
+
return ErrorToolResult(
|
|
567
|
+
error_message=f"Path is not a file: {tool_input.file_path}"
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
try:
|
|
571
|
+
# Read file
|
|
572
|
+
content = file_path.read_bytes()
|
|
573
|
+
file_size = len(content)
|
|
574
|
+
|
|
575
|
+
# Upload to pre-signed URL
|
|
576
|
+
async with aiohttp.ClientSession() as session:
|
|
577
|
+
headers = {"Content-Type": tool_input.content_type}
|
|
578
|
+
async with session.put(
|
|
579
|
+
tool_input.presigned_url, data=content, headers=headers
|
|
580
|
+
) as response:
|
|
581
|
+
if response.status not in (200, 204):
|
|
582
|
+
error_text = await response.text()
|
|
583
|
+
return ErrorToolResult(
|
|
584
|
+
error_message=f"Failed to upload artifact: HTTP {response.status} - {error_text}"
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
return UploadArtifactToolResult(
|
|
588
|
+
artifact_id=tool_input.artifact_id,
|
|
589
|
+
file_size_bytes=file_size,
|
|
590
|
+
content_type=tool_input.content_type,
|
|
591
|
+
)
|
|
592
|
+
|
|
593
|
+
except Exception as e:
|
|
594
|
+
logger.exception("Failed to upload artifact")
|
|
595
|
+
return ErrorToolResult(error_message=f"Failed to upload artifact: {e!s}")
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import msgspec
|
|
4
|
+
|
|
5
|
+
from exponent.core.remote_execution.cli_rpc_types import ToolResult
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def to_mostly_xml(tool_result: ToolResult) -> str:
|
|
9
|
+
"""
|
|
10
|
+
This provides a default textual representation of the tool result. Override it as needed for your tool."""
|
|
11
|
+
d = msgspec.to_builtins(tool_result)
|
|
12
|
+
del d["tool_name"]
|
|
13
|
+
return to_mostly_xml_helper(d)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def to_mostly_xml_helper(
|
|
17
|
+
d: Any,
|
|
18
|
+
) -> str:
|
|
19
|
+
if isinstance(d, dict):
|
|
20
|
+
# No outer wrapper at top level, each field gets XML tags
|
|
21
|
+
parts = []
|
|
22
|
+
for key, value in d.items():
|
|
23
|
+
if isinstance(value, list):
|
|
24
|
+
# Handle lists with item tags
|
|
25
|
+
list_items = "\n".join(
|
|
26
|
+
f"<item>\n{to_mostly_xml_helper(item)}\n</item>" for item in value
|
|
27
|
+
)
|
|
28
|
+
parts.append(f"<{key}>\n{list_items}\n</{key}>")
|
|
29
|
+
elif isinstance(value, dict):
|
|
30
|
+
# Nested dict
|
|
31
|
+
parts.append(f"<{key}>\n{to_mostly_xml_helper(value)}\n</{key}>")
|
|
32
|
+
else:
|
|
33
|
+
# Scalar value
|
|
34
|
+
parts.append(f"<{key}>\n{value!s}\n</{key}>")
|
|
35
|
+
return "\n".join(parts)
|
|
36
|
+
elif isinstance(d, list):
|
|
37
|
+
raise ValueError("Lists are not allowed at the top level")
|
|
38
|
+
else:
|
|
39
|
+
return str(d)
|