deepagents 0.3.8__py3-none-any.whl → 0.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepagents/__init__.py +3 -1
- deepagents/_version.py +3 -0
- deepagents/backends/__init__.py +2 -0
- deepagents/backends/composite.py +2 -2
- deepagents/backends/filesystem.py +13 -21
- deepagents/backends/local_shell.py +305 -0
- deepagents/backends/sandbox.py +431 -24
- deepagents/backends/utils.py +69 -24
- deepagents/middleware/filesystem.py +482 -522
- deepagents/middleware/skills.py +1 -1
- deepagents/middleware/subagents.py +23 -9
- deepagents/middleware/summarization.py +9 -4
- deepagents/py.typed +0 -0
- deepagents-0.3.10.dist-info/METADATA +76 -0
- deepagents-0.3.10.dist-info/RECORD +25 -0
- {deepagents-0.3.8.dist-info → deepagents-0.3.10.dist-info}/WHEEL +1 -1
- deepagents-0.3.8.dist-info/METADATA +0 -527
- deepagents-0.3.8.dist-info/RECORD +0 -22
- {deepagents-0.3.8.dist-info → deepagents-0.3.10.dist-info}/top_level.txt +0 -0
|
@@ -41,6 +41,21 @@ LINE_NUMBER_WIDTH = 6
|
|
|
41
41
|
DEFAULT_READ_OFFSET = 0
|
|
42
42
|
DEFAULT_READ_LIMIT = 100
|
|
43
43
|
|
|
44
|
+
# Template for truncation message in read_file
|
|
45
|
+
# {file_path} will be filled in at runtime
|
|
46
|
+
READ_FILE_TRUNCATION_MSG = (
|
|
47
|
+
"\n\n[Output was truncated due to size limits. "
|
|
48
|
+
"The file content is very large. "
|
|
49
|
+
"Consider reformatting the file to make it easier to navigate. "
|
|
50
|
+
"For example, if this is JSON, use execute(command='jq . {file_path}') to pretty-print it with line breaks. "
|
|
51
|
+
"For other formats, you can use appropriate formatting tools to split long lines.]"
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Approximate number of characters per token for truncation calculations.
|
|
55
|
+
# Using 4 chars per token as a conservative approximation (actual ratio varies by content)
|
|
56
|
+
# This errs on the high side to avoid premature eviction of content that might fit
|
|
57
|
+
NUM_CHARS_PER_TOKEN = 4
|
|
58
|
+
|
|
44
59
|
|
|
45
60
|
class FileData(TypedDict):
|
|
46
61
|
"""Data structure for storing file contents with metadata."""
|
|
@@ -206,11 +221,13 @@ Examples:
|
|
|
206
221
|
GREP_TOOL_DESCRIPTION = """Search for a text pattern across files.
|
|
207
222
|
|
|
208
223
|
Searches for literal text (not regex) and returns matching files or content based on output_mode.
|
|
224
|
+
Special characters like parentheses, brackets, pipes, etc. are treated as literal characters, not regex operators.
|
|
209
225
|
|
|
210
226
|
Examples:
|
|
211
227
|
- Search all files: `grep(pattern="TODO")`
|
|
212
228
|
- Search Python files only: `grep(pattern="import", glob="*.py")`
|
|
213
|
-
- Show matching lines: `grep(pattern="error", output_mode="content")`
|
|
229
|
+
- Show matching lines: `grep(pattern="error", output_mode="content")`
|
|
230
|
+
- Search for code with special chars: `grep(pattern="def __init__(self):")`"""
|
|
214
231
|
|
|
215
232
|
EXECUTE_TOOL_DESCRIPTION = """Executes a shell command in an isolated sandbox environment.
|
|
216
233
|
|
|
@@ -274,387 +291,6 @@ Use this tool to run commands, scripts, tests, builds, and other shell operation
|
|
|
274
291
|
- execute: run a shell command in the sandbox (returns output and exit code)"""
|
|
275
292
|
|
|
276
293
|
|
|
277
|
-
def _get_backend(backend: BACKEND_TYPES, runtime: ToolRuntime) -> BackendProtocol:
|
|
278
|
-
"""Get the resolved backend instance from backend or factory.
|
|
279
|
-
|
|
280
|
-
Args:
|
|
281
|
-
backend: Backend instance or factory function.
|
|
282
|
-
runtime: The tool runtime context.
|
|
283
|
-
|
|
284
|
-
Returns:
|
|
285
|
-
Resolved backend instance.
|
|
286
|
-
"""
|
|
287
|
-
if callable(backend):
|
|
288
|
-
return backend(runtime)
|
|
289
|
-
return backend
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
def _ls_tool_generator(
|
|
293
|
-
backend: BackendProtocol | Callable[[ToolRuntime], BackendProtocol],
|
|
294
|
-
custom_description: str | None = None,
|
|
295
|
-
) -> BaseTool:
|
|
296
|
-
"""Generate the ls (list files) tool.
|
|
297
|
-
|
|
298
|
-
Args:
|
|
299
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
300
|
-
custom_description: Optional custom description for the tool.
|
|
301
|
-
|
|
302
|
-
Returns:
|
|
303
|
-
Configured ls tool that lists files using the backend.
|
|
304
|
-
"""
|
|
305
|
-
tool_description = custom_description or LIST_FILES_TOOL_DESCRIPTION
|
|
306
|
-
|
|
307
|
-
def sync_ls(
|
|
308
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
309
|
-
path: Annotated[str, "Absolute path to the directory to list. Must be absolute, not relative."],
|
|
310
|
-
) -> str:
|
|
311
|
-
"""Synchronous wrapper for ls tool."""
|
|
312
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
313
|
-
validated_path = _validate_path(path)
|
|
314
|
-
infos = resolved_backend.ls_info(validated_path)
|
|
315
|
-
paths = [fi.get("path", "") for fi in infos]
|
|
316
|
-
result = truncate_if_too_long(paths)
|
|
317
|
-
return str(result)
|
|
318
|
-
|
|
319
|
-
async def async_ls(
|
|
320
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
321
|
-
path: Annotated[str, "Absolute path to the directory to list. Must be absolute, not relative."],
|
|
322
|
-
) -> str:
|
|
323
|
-
"""Asynchronous wrapper for ls tool."""
|
|
324
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
325
|
-
validated_path = _validate_path(path)
|
|
326
|
-
infos = await resolved_backend.als_info(validated_path)
|
|
327
|
-
paths = [fi.get("path", "") for fi in infos]
|
|
328
|
-
result = truncate_if_too_long(paths)
|
|
329
|
-
return str(result)
|
|
330
|
-
|
|
331
|
-
return StructuredTool.from_function(
|
|
332
|
-
name="ls",
|
|
333
|
-
description=tool_description,
|
|
334
|
-
func=sync_ls,
|
|
335
|
-
coroutine=async_ls,
|
|
336
|
-
)
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
def _read_file_tool_generator(
|
|
340
|
-
backend: BackendProtocol | Callable[[ToolRuntime], BackendProtocol],
|
|
341
|
-
custom_description: str | None = None,
|
|
342
|
-
) -> BaseTool:
|
|
343
|
-
"""Generate the read_file tool.
|
|
344
|
-
|
|
345
|
-
Args:
|
|
346
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
347
|
-
custom_description: Optional custom description for the tool.
|
|
348
|
-
|
|
349
|
-
Returns:
|
|
350
|
-
Configured read_file tool that reads files using the backend.
|
|
351
|
-
"""
|
|
352
|
-
tool_description = custom_description or READ_FILE_TOOL_DESCRIPTION
|
|
353
|
-
|
|
354
|
-
def sync_read_file(
|
|
355
|
-
file_path: Annotated[str, "Absolute path to the file to read. Must be absolute, not relative."],
|
|
356
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
357
|
-
offset: Annotated[int, "Line number to start reading from (0-indexed). Use for pagination of large files."] = DEFAULT_READ_OFFSET,
|
|
358
|
-
limit: Annotated[int, "Maximum number of lines to read. Use for pagination of large files."] = DEFAULT_READ_LIMIT,
|
|
359
|
-
) -> str:
|
|
360
|
-
"""Synchronous wrapper for read_file tool."""
|
|
361
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
362
|
-
file_path = _validate_path(file_path)
|
|
363
|
-
result = resolved_backend.read(file_path, offset=offset, limit=limit)
|
|
364
|
-
|
|
365
|
-
lines = result.splitlines(keepends=True)
|
|
366
|
-
if len(lines) > limit:
|
|
367
|
-
lines = lines[:limit]
|
|
368
|
-
result = "".join(lines)
|
|
369
|
-
|
|
370
|
-
return result
|
|
371
|
-
|
|
372
|
-
async def async_read_file(
|
|
373
|
-
file_path: Annotated[str, "Absolute path to the file to read. Must be absolute, not relative."],
|
|
374
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
375
|
-
offset: Annotated[int, "Line number to start reading from (0-indexed). Use for pagination of large files."] = DEFAULT_READ_OFFSET,
|
|
376
|
-
limit: Annotated[int, "Maximum number of lines to read. Use for pagination of large files."] = DEFAULT_READ_LIMIT,
|
|
377
|
-
) -> str:
|
|
378
|
-
"""Asynchronous wrapper for read_file tool."""
|
|
379
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
380
|
-
file_path = _validate_path(file_path)
|
|
381
|
-
result = await resolved_backend.aread(file_path, offset=offset, limit=limit)
|
|
382
|
-
|
|
383
|
-
lines = result.splitlines(keepends=True)
|
|
384
|
-
if len(lines) > limit:
|
|
385
|
-
lines = lines[:limit]
|
|
386
|
-
result = "".join(lines)
|
|
387
|
-
|
|
388
|
-
return result
|
|
389
|
-
|
|
390
|
-
return StructuredTool.from_function(
|
|
391
|
-
name="read_file",
|
|
392
|
-
description=tool_description,
|
|
393
|
-
func=sync_read_file,
|
|
394
|
-
coroutine=async_read_file,
|
|
395
|
-
)
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
def _write_file_tool_generator(
|
|
399
|
-
backend: BackendProtocol | Callable[[ToolRuntime], BackendProtocol],
|
|
400
|
-
custom_description: str | None = None,
|
|
401
|
-
) -> BaseTool:
|
|
402
|
-
"""Generate the write_file tool.
|
|
403
|
-
|
|
404
|
-
Args:
|
|
405
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
406
|
-
custom_description: Optional custom description for the tool.
|
|
407
|
-
|
|
408
|
-
Returns:
|
|
409
|
-
Configured write_file tool that creates new files using the backend.
|
|
410
|
-
"""
|
|
411
|
-
tool_description = custom_description or WRITE_FILE_TOOL_DESCRIPTION
|
|
412
|
-
|
|
413
|
-
def sync_write_file(
|
|
414
|
-
file_path: Annotated[str, "Absolute path where the file should be created. Must be absolute, not relative."],
|
|
415
|
-
content: Annotated[str, "The text content to write to the file. This parameter is required."],
|
|
416
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
417
|
-
) -> Command | str:
|
|
418
|
-
"""Synchronous wrapper for write_file tool."""
|
|
419
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
420
|
-
file_path = _validate_path(file_path)
|
|
421
|
-
res: WriteResult = resolved_backend.write(file_path, content)
|
|
422
|
-
if res.error:
|
|
423
|
-
return res.error
|
|
424
|
-
# If backend returns state update, wrap into Command with ToolMessage
|
|
425
|
-
if res.files_update is not None:
|
|
426
|
-
return Command(
|
|
427
|
-
update={
|
|
428
|
-
"files": res.files_update,
|
|
429
|
-
"messages": [
|
|
430
|
-
ToolMessage(
|
|
431
|
-
content=f"Updated file {res.path}",
|
|
432
|
-
tool_call_id=runtime.tool_call_id,
|
|
433
|
-
)
|
|
434
|
-
],
|
|
435
|
-
}
|
|
436
|
-
)
|
|
437
|
-
return f"Updated file {res.path}"
|
|
438
|
-
|
|
439
|
-
async def async_write_file(
|
|
440
|
-
file_path: Annotated[str, "Absolute path where the file should be created. Must be absolute, not relative."],
|
|
441
|
-
content: Annotated[str, "The text content to write to the file. This parameter is required."],
|
|
442
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
443
|
-
) -> Command | str:
|
|
444
|
-
"""Asynchronous wrapper for write_file tool."""
|
|
445
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
446
|
-
file_path = _validate_path(file_path)
|
|
447
|
-
res: WriteResult = await resolved_backend.awrite(file_path, content)
|
|
448
|
-
if res.error:
|
|
449
|
-
return res.error
|
|
450
|
-
# If backend returns state update, wrap into Command with ToolMessage
|
|
451
|
-
if res.files_update is not None:
|
|
452
|
-
return Command(
|
|
453
|
-
update={
|
|
454
|
-
"files": res.files_update,
|
|
455
|
-
"messages": [
|
|
456
|
-
ToolMessage(
|
|
457
|
-
content=f"Updated file {res.path}",
|
|
458
|
-
tool_call_id=runtime.tool_call_id,
|
|
459
|
-
)
|
|
460
|
-
],
|
|
461
|
-
}
|
|
462
|
-
)
|
|
463
|
-
return f"Updated file {res.path}"
|
|
464
|
-
|
|
465
|
-
return StructuredTool.from_function(
|
|
466
|
-
name="write_file",
|
|
467
|
-
description=tool_description,
|
|
468
|
-
func=sync_write_file,
|
|
469
|
-
coroutine=async_write_file,
|
|
470
|
-
)
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
def _edit_file_tool_generator(
|
|
474
|
-
backend: BackendProtocol | Callable[[ToolRuntime], BackendProtocol],
|
|
475
|
-
custom_description: str | None = None,
|
|
476
|
-
) -> BaseTool:
|
|
477
|
-
"""Generate the edit_file tool.
|
|
478
|
-
|
|
479
|
-
Args:
|
|
480
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
481
|
-
custom_description: Optional custom description for the tool.
|
|
482
|
-
|
|
483
|
-
Returns:
|
|
484
|
-
Configured edit_file tool that performs string replacements in files using the backend.
|
|
485
|
-
"""
|
|
486
|
-
tool_description = custom_description or EDIT_FILE_TOOL_DESCRIPTION
|
|
487
|
-
|
|
488
|
-
def sync_edit_file(
|
|
489
|
-
file_path: Annotated[str, "Absolute path to the file to edit. Must be absolute, not relative."],
|
|
490
|
-
old_string: Annotated[str, "The exact text to find and replace. Must be unique in the file unless replace_all is True."],
|
|
491
|
-
new_string: Annotated[str, "The text to replace old_string with. Must be different from old_string."],
|
|
492
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
493
|
-
*,
|
|
494
|
-
replace_all: Annotated[bool, "If True, replace all occurrences of old_string. If False (default), old_string must be unique."] = False,
|
|
495
|
-
) -> Command | str:
|
|
496
|
-
"""Synchronous wrapper for edit_file tool."""
|
|
497
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
498
|
-
file_path = _validate_path(file_path)
|
|
499
|
-
res: EditResult = resolved_backend.edit(file_path, old_string, new_string, replace_all=replace_all)
|
|
500
|
-
if res.error:
|
|
501
|
-
return res.error
|
|
502
|
-
if res.files_update is not None:
|
|
503
|
-
return Command(
|
|
504
|
-
update={
|
|
505
|
-
"files": res.files_update,
|
|
506
|
-
"messages": [
|
|
507
|
-
ToolMessage(
|
|
508
|
-
content=f"Successfully replaced {res.occurrences} instance(s) of the string in '{res.path}'",
|
|
509
|
-
tool_call_id=runtime.tool_call_id,
|
|
510
|
-
)
|
|
511
|
-
],
|
|
512
|
-
}
|
|
513
|
-
)
|
|
514
|
-
return f"Successfully replaced {res.occurrences} instance(s) of the string in '{res.path}'"
|
|
515
|
-
|
|
516
|
-
async def async_edit_file(
|
|
517
|
-
file_path: Annotated[str, "Absolute path to the file to edit. Must be absolute, not relative."],
|
|
518
|
-
old_string: Annotated[str, "The exact text to find and replace. Must be unique in the file unless replace_all is True."],
|
|
519
|
-
new_string: Annotated[str, "The text to replace old_string with. Must be different from old_string."],
|
|
520
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
521
|
-
*,
|
|
522
|
-
replace_all: Annotated[bool, "If True, replace all occurrences of old_string. If False (default), old_string must be unique."] = False,
|
|
523
|
-
) -> Command | str:
|
|
524
|
-
"""Asynchronous wrapper for edit_file tool."""
|
|
525
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
526
|
-
file_path = _validate_path(file_path)
|
|
527
|
-
res: EditResult = await resolved_backend.aedit(file_path, old_string, new_string, replace_all=replace_all)
|
|
528
|
-
if res.error:
|
|
529
|
-
return res.error
|
|
530
|
-
if res.files_update is not None:
|
|
531
|
-
return Command(
|
|
532
|
-
update={
|
|
533
|
-
"files": res.files_update,
|
|
534
|
-
"messages": [
|
|
535
|
-
ToolMessage(
|
|
536
|
-
content=f"Successfully replaced {res.occurrences} instance(s) of the string in '{res.path}'",
|
|
537
|
-
tool_call_id=runtime.tool_call_id,
|
|
538
|
-
)
|
|
539
|
-
],
|
|
540
|
-
}
|
|
541
|
-
)
|
|
542
|
-
return f"Successfully replaced {res.occurrences} instance(s) of the string in '{res.path}'"
|
|
543
|
-
|
|
544
|
-
return StructuredTool.from_function(
|
|
545
|
-
name="edit_file",
|
|
546
|
-
description=tool_description,
|
|
547
|
-
func=sync_edit_file,
|
|
548
|
-
coroutine=async_edit_file,
|
|
549
|
-
)
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
def _glob_tool_generator(
|
|
553
|
-
backend: BackendProtocol | Callable[[ToolRuntime], BackendProtocol],
|
|
554
|
-
custom_description: str | None = None,
|
|
555
|
-
) -> BaseTool:
|
|
556
|
-
"""Generate the glob tool.
|
|
557
|
-
|
|
558
|
-
Args:
|
|
559
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
560
|
-
custom_description: Optional custom description for the tool.
|
|
561
|
-
|
|
562
|
-
Returns:
|
|
563
|
-
Configured glob tool that finds files by pattern using the backend.
|
|
564
|
-
"""
|
|
565
|
-
tool_description = custom_description or GLOB_TOOL_DESCRIPTION
|
|
566
|
-
|
|
567
|
-
def sync_glob(
|
|
568
|
-
pattern: Annotated[str, "Glob pattern to match files (e.g., '**/*.py', '*.txt', '/subdir/**/*.md')."],
|
|
569
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
570
|
-
path: Annotated[str, "Base directory to search from. Defaults to root '/'."] = "/",
|
|
571
|
-
) -> str:
|
|
572
|
-
"""Synchronous wrapper for glob tool."""
|
|
573
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
574
|
-
infos = resolved_backend.glob_info(pattern, path=path)
|
|
575
|
-
paths = [fi.get("path", "") for fi in infos]
|
|
576
|
-
result = truncate_if_too_long(paths)
|
|
577
|
-
return str(result)
|
|
578
|
-
|
|
579
|
-
async def async_glob(
|
|
580
|
-
pattern: Annotated[str, "Glob pattern to match files (e.g., '**/*.py', '*.txt', '/subdir/**/*.md')."],
|
|
581
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
582
|
-
path: Annotated[str, "Base directory to search from. Defaults to root '/'."] = "/",
|
|
583
|
-
) -> str:
|
|
584
|
-
"""Asynchronous wrapper for glob tool."""
|
|
585
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
586
|
-
infos = await resolved_backend.aglob_info(pattern, path=path)
|
|
587
|
-
paths = [fi.get("path", "") for fi in infos]
|
|
588
|
-
result = truncate_if_too_long(paths)
|
|
589
|
-
return str(result)
|
|
590
|
-
|
|
591
|
-
return StructuredTool.from_function(
|
|
592
|
-
name="glob",
|
|
593
|
-
description=tool_description,
|
|
594
|
-
func=sync_glob,
|
|
595
|
-
coroutine=async_glob,
|
|
596
|
-
)
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
def _grep_tool_generator(
|
|
600
|
-
backend: BackendProtocol | Callable[[ToolRuntime], BackendProtocol],
|
|
601
|
-
custom_description: str | None = None,
|
|
602
|
-
) -> BaseTool:
|
|
603
|
-
"""Generate the grep tool.
|
|
604
|
-
|
|
605
|
-
Args:
|
|
606
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
607
|
-
custom_description: Optional custom description for the tool.
|
|
608
|
-
|
|
609
|
-
Returns:
|
|
610
|
-
Configured grep tool that searches for patterns in files using the backend.
|
|
611
|
-
"""
|
|
612
|
-
tool_description = custom_description or GREP_TOOL_DESCRIPTION
|
|
613
|
-
|
|
614
|
-
def sync_grep(
|
|
615
|
-
pattern: Annotated[str, "Text pattern to search for (literal string, not regex)."],
|
|
616
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
617
|
-
path: Annotated[str | None, "Directory to search in. Defaults to current working directory."] = None,
|
|
618
|
-
glob: Annotated[str | None, "Glob pattern to filter which files to search (e.g., '*.py')."] = None,
|
|
619
|
-
output_mode: Annotated[
|
|
620
|
-
Literal["files_with_matches", "content", "count"],
|
|
621
|
-
"Output format: 'files_with_matches' (file paths only, default), 'content' (matching lines with context), 'count' (match counts per file).",
|
|
622
|
-
] = "files_with_matches",
|
|
623
|
-
) -> str:
|
|
624
|
-
"""Synchronous wrapper for grep tool."""
|
|
625
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
626
|
-
raw = resolved_backend.grep_raw(pattern, path=path, glob=glob)
|
|
627
|
-
if isinstance(raw, str):
|
|
628
|
-
return raw
|
|
629
|
-
formatted = format_grep_matches(raw, output_mode)
|
|
630
|
-
return truncate_if_too_long(formatted) # type: ignore[arg-type]
|
|
631
|
-
|
|
632
|
-
async def async_grep(
|
|
633
|
-
pattern: Annotated[str, "Text pattern to search for (literal string, not regex)."],
|
|
634
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
635
|
-
path: Annotated[str | None, "Directory to search in. Defaults to current working directory."] = None,
|
|
636
|
-
glob: Annotated[str | None, "Glob pattern to filter which files to search (e.g., '*.py')."] = None,
|
|
637
|
-
output_mode: Annotated[
|
|
638
|
-
Literal["files_with_matches", "content", "count"],
|
|
639
|
-
"Output format: 'files_with_matches' (file paths only, default), 'content' (matching lines with context), 'count' (match counts per file).",
|
|
640
|
-
] = "files_with_matches",
|
|
641
|
-
) -> str:
|
|
642
|
-
"""Asynchronous wrapper for grep tool."""
|
|
643
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
644
|
-
raw = await resolved_backend.agrep_raw(pattern, path=path, glob=glob)
|
|
645
|
-
if isinstance(raw, str):
|
|
646
|
-
return raw
|
|
647
|
-
formatted = format_grep_matches(raw, output_mode)
|
|
648
|
-
return truncate_if_too_long(formatted) # type: ignore[arg-type]
|
|
649
|
-
|
|
650
|
-
return StructuredTool.from_function(
|
|
651
|
-
name="grep",
|
|
652
|
-
description=tool_description,
|
|
653
|
-
func=sync_grep,
|
|
654
|
-
coroutine=async_grep,
|
|
655
|
-
)
|
|
656
|
-
|
|
657
|
-
|
|
658
294
|
def _supports_execution(backend: BackendProtocol) -> bool:
|
|
659
295
|
"""Check if a backend supports command execution.
|
|
660
296
|
|
|
@@ -675,95 +311,6 @@ def _supports_execution(backend: BackendProtocol) -> bool:
|
|
|
675
311
|
return isinstance(backend, SandboxBackendProtocol)
|
|
676
312
|
|
|
677
313
|
|
|
678
|
-
def _execute_tool_generator(
|
|
679
|
-
backend: BackendProtocol | Callable[[ToolRuntime], BackendProtocol],
|
|
680
|
-
custom_description: str | None = None,
|
|
681
|
-
) -> BaseTool:
|
|
682
|
-
"""Generate the execute tool for sandbox command execution.
|
|
683
|
-
|
|
684
|
-
Args:
|
|
685
|
-
backend: Backend to use for execution, or a factory function that takes runtime and returns a backend.
|
|
686
|
-
custom_description: Optional custom description for the tool.
|
|
687
|
-
|
|
688
|
-
Returns:
|
|
689
|
-
Configured execute tool that runs commands if backend supports SandboxBackendProtocol.
|
|
690
|
-
"""
|
|
691
|
-
tool_description = custom_description or EXECUTE_TOOL_DESCRIPTION
|
|
692
|
-
|
|
693
|
-
def sync_execute(
|
|
694
|
-
command: Annotated[str, "Shell command to execute in the sandbox environment."],
|
|
695
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
696
|
-
) -> str:
|
|
697
|
-
"""Synchronous wrapper for execute tool."""
|
|
698
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
699
|
-
|
|
700
|
-
# Runtime check - fail gracefully if not supported
|
|
701
|
-
if not _supports_execution(resolved_backend):
|
|
702
|
-
return (
|
|
703
|
-
"Error: Execution not available. This agent's backend "
|
|
704
|
-
"does not support command execution (SandboxBackendProtocol). "
|
|
705
|
-
"To use the execute tool, provide a backend that implements SandboxBackendProtocol."
|
|
706
|
-
)
|
|
707
|
-
|
|
708
|
-
try:
|
|
709
|
-
result = resolved_backend.execute(command)
|
|
710
|
-
except NotImplementedError as e:
|
|
711
|
-
# Handle case where execute() exists but raises NotImplementedError
|
|
712
|
-
return f"Error: Execution not available. {e}"
|
|
713
|
-
|
|
714
|
-
# Format output for LLM consumption
|
|
715
|
-
parts = [result.output]
|
|
716
|
-
|
|
717
|
-
if result.exit_code is not None:
|
|
718
|
-
status = "succeeded" if result.exit_code == 0 else "failed"
|
|
719
|
-
parts.append(f"\n[Command {status} with exit code {result.exit_code}]")
|
|
720
|
-
|
|
721
|
-
if result.truncated:
|
|
722
|
-
parts.append("\n[Output was truncated due to size limits]")
|
|
723
|
-
|
|
724
|
-
return "".join(parts)
|
|
725
|
-
|
|
726
|
-
async def async_execute(
|
|
727
|
-
command: Annotated[str, "Shell command to execute in the sandbox environment."],
|
|
728
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
729
|
-
) -> str:
|
|
730
|
-
"""Asynchronous wrapper for execute tool."""
|
|
731
|
-
resolved_backend = _get_backend(backend, runtime)
|
|
732
|
-
|
|
733
|
-
# Runtime check - fail gracefully if not supported
|
|
734
|
-
if not _supports_execution(resolved_backend):
|
|
735
|
-
return (
|
|
736
|
-
"Error: Execution not available. This agent's backend "
|
|
737
|
-
"does not support command execution (SandboxBackendProtocol). "
|
|
738
|
-
"To use the execute tool, provide a backend that implements SandboxBackendProtocol."
|
|
739
|
-
)
|
|
740
|
-
|
|
741
|
-
try:
|
|
742
|
-
result = await resolved_backend.aexecute(command)
|
|
743
|
-
except NotImplementedError as e:
|
|
744
|
-
# Handle case where execute() exists but raises NotImplementedError
|
|
745
|
-
return f"Error: Execution not available. {e}"
|
|
746
|
-
|
|
747
|
-
# Format output for LLM consumption
|
|
748
|
-
parts = [result.output]
|
|
749
|
-
|
|
750
|
-
if result.exit_code is not None:
|
|
751
|
-
status = "succeeded" if result.exit_code == 0 else "failed"
|
|
752
|
-
parts.append(f"\n[Command {status} with exit code {result.exit_code}]")
|
|
753
|
-
|
|
754
|
-
if result.truncated:
|
|
755
|
-
parts.append("\n[Output was truncated due to size limits]")
|
|
756
|
-
|
|
757
|
-
return "".join(parts)
|
|
758
|
-
|
|
759
|
-
return StructuredTool.from_function(
|
|
760
|
-
name="execute",
|
|
761
|
-
description=tool_description,
|
|
762
|
-
func=sync_execute,
|
|
763
|
-
coroutine=async_execute,
|
|
764
|
-
)
|
|
765
|
-
|
|
766
|
-
|
|
767
314
|
# Tools that should be excluded from the large result eviction logic.
|
|
768
315
|
#
|
|
769
316
|
# This tuple contains tools that should NOT have their results evicted to the filesystem
|
|
@@ -795,48 +342,46 @@ TOOLS_EXCLUDED_FROM_EVICTION = (
|
|
|
795
342
|
)
|
|
796
343
|
|
|
797
344
|
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
345
|
+
TOO_LARGE_TOOL_MSG = """Tool result too large, the result of this tool call {tool_call_id} was saved in the filesystem at this path: {file_path}
|
|
346
|
+
You can read the result from the filesystem by using the read_file tool, but make sure to only read part of the result at a time.
|
|
347
|
+
You can do this by specifying an offset and limit in the read_file tool call.
|
|
348
|
+
For example, to read the first 100 lines, you can use the read_file tool with offset=0 and limit=100.
|
|
349
|
+
|
|
350
|
+
Here is a preview showing the head and tail of the result (lines of the form
|
|
351
|
+
... [N lines truncated] ...
|
|
352
|
+
indicate omitted lines in the middle of the content):
|
|
353
|
+
|
|
354
|
+
{content_sample}
|
|
355
|
+
"""
|
|
807
356
|
|
|
808
357
|
|
|
809
|
-
def
|
|
810
|
-
|
|
811
|
-
custom_tool_descriptions: dict[str, str] | None = None,
|
|
812
|
-
) -> list[BaseTool]:
|
|
813
|
-
"""Get filesystem and execution tools.
|
|
358
|
+
def _create_content_preview(content_str: str, *, head_lines: int = 5, tail_lines: int = 5) -> str:
|
|
359
|
+
"""Create a preview of content showing head and tail with truncation marker.
|
|
814
360
|
|
|
815
361
|
Args:
|
|
816
|
-
|
|
817
|
-
|
|
362
|
+
content_str: The full content string to preview.
|
|
363
|
+
head_lines: Number of lines to show from the start.
|
|
364
|
+
tail_lines: Number of lines to show from the end.
|
|
818
365
|
|
|
819
366
|
Returns:
|
|
820
|
-
|
|
367
|
+
Formatted preview string with line numbers.
|
|
821
368
|
"""
|
|
822
|
-
|
|
823
|
-
custom_tool_descriptions = {}
|
|
824
|
-
tools = []
|
|
369
|
+
lines = content_str.splitlines()
|
|
825
370
|
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
371
|
+
if len(lines) <= head_lines + tail_lines:
|
|
372
|
+
# If file is small enough, show all lines
|
|
373
|
+
preview_lines = [line[:1000] for line in lines]
|
|
374
|
+
return format_content_with_line_numbers(preview_lines, start_line=1)
|
|
830
375
|
|
|
376
|
+
# Show head and tail with truncation marker
|
|
377
|
+
head = [line[:1000] for line in lines[:head_lines]]
|
|
378
|
+
tail = [line[:1000] for line in lines[-tail_lines:]]
|
|
831
379
|
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
For example, to read the first 100 lines, you can use the read_file tool with offset=0 and limit=100.
|
|
380
|
+
head_sample = format_content_with_line_numbers(head, start_line=1)
|
|
381
|
+
truncation_notice = f"\n... [{len(lines) - head_lines - tail_lines} lines truncated] ...\n"
|
|
382
|
+
tail_sample = format_content_with_line_numbers(tail, start_line=len(lines) - tail_lines + 1)
|
|
836
383
|
|
|
837
|
-
|
|
838
|
-
{content_sample}
|
|
839
|
-
"""
|
|
384
|
+
return head_sample + truncation_notice + tail_sample
|
|
840
385
|
|
|
841
386
|
|
|
842
387
|
class FilesystemMiddleware(AgentMiddleware):
|
|
@@ -909,15 +454,23 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
909
454
|
custom_tool_descriptions: Optional custom tool descriptions override.
|
|
910
455
|
tool_token_limit_before_evict: Optional token limit before evicting a tool result to the filesystem.
|
|
911
456
|
"""
|
|
912
|
-
self.tool_token_limit_before_evict = tool_token_limit_before_evict
|
|
913
|
-
|
|
914
457
|
# Use provided backend or default to StateBackend factory
|
|
915
458
|
self.backend = backend if backend is not None else (lambda rt: StateBackend(rt))
|
|
916
459
|
|
|
917
|
-
#
|
|
460
|
+
# Store configuration (private - internal implementation details)
|
|
918
461
|
self._custom_system_prompt = system_prompt
|
|
919
|
-
|
|
920
|
-
self.
|
|
462
|
+
self._custom_tool_descriptions = custom_tool_descriptions or {}
|
|
463
|
+
self._tool_token_limit_before_evict = tool_token_limit_before_evict
|
|
464
|
+
|
|
465
|
+
self.tools = [
|
|
466
|
+
self._create_ls_tool(),
|
|
467
|
+
self._create_read_file_tool(),
|
|
468
|
+
self._create_write_file_tool(),
|
|
469
|
+
self._create_edit_file_tool(),
|
|
470
|
+
self._create_glob_tool(),
|
|
471
|
+
self._create_grep_tool(),
|
|
472
|
+
self._create_execute_tool(),
|
|
473
|
+
]
|
|
921
474
|
|
|
922
475
|
def _get_backend(self, runtime: ToolRuntime) -> BackendProtocol:
|
|
923
476
|
"""Get the resolved backend instance from backend or factory.
|
|
@@ -932,6 +485,418 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
932
485
|
return self.backend(runtime)
|
|
933
486
|
return self.backend
|
|
934
487
|
|
|
488
|
+
def _create_ls_tool(self) -> BaseTool:
|
|
489
|
+
"""Create the ls (list files) tool."""
|
|
490
|
+
tool_description = self._custom_tool_descriptions.get("ls") or LIST_FILES_TOOL_DESCRIPTION
|
|
491
|
+
|
|
492
|
+
def sync_ls(
|
|
493
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
494
|
+
path: Annotated[str, "Absolute path to the directory to list. Must be absolute, not relative."],
|
|
495
|
+
) -> str:
|
|
496
|
+
"""Synchronous wrapper for ls tool."""
|
|
497
|
+
resolved_backend = self._get_backend(runtime)
|
|
498
|
+
try:
|
|
499
|
+
validated_path = _validate_path(path)
|
|
500
|
+
except ValueError as e:
|
|
501
|
+
return f"Error: {e}"
|
|
502
|
+
infos = resolved_backend.ls_info(validated_path)
|
|
503
|
+
paths = [fi.get("path", "") for fi in infos]
|
|
504
|
+
result = truncate_if_too_long(paths)
|
|
505
|
+
return str(result)
|
|
506
|
+
|
|
507
|
+
async def async_ls(
|
|
508
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
509
|
+
path: Annotated[str, "Absolute path to the directory to list. Must be absolute, not relative."],
|
|
510
|
+
) -> str:
|
|
511
|
+
"""Asynchronous wrapper for ls tool."""
|
|
512
|
+
resolved_backend = self._get_backend(runtime)
|
|
513
|
+
try:
|
|
514
|
+
validated_path = _validate_path(path)
|
|
515
|
+
except ValueError as e:
|
|
516
|
+
return f"Error: {e}"
|
|
517
|
+
infos = await resolved_backend.als_info(validated_path)
|
|
518
|
+
paths = [fi.get("path", "") for fi in infos]
|
|
519
|
+
result = truncate_if_too_long(paths)
|
|
520
|
+
return str(result)
|
|
521
|
+
|
|
522
|
+
return StructuredTool.from_function(
|
|
523
|
+
name="ls",
|
|
524
|
+
description=tool_description,
|
|
525
|
+
func=sync_ls,
|
|
526
|
+
coroutine=async_ls,
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
def _create_read_file_tool(self) -> BaseTool:
|
|
530
|
+
"""Create the read_file tool."""
|
|
531
|
+
tool_description = self._custom_tool_descriptions.get("read_file") or READ_FILE_TOOL_DESCRIPTION
|
|
532
|
+
token_limit = self._tool_token_limit_before_evict
|
|
533
|
+
|
|
534
|
+
def sync_read_file(
|
|
535
|
+
file_path: Annotated[str, "Absolute path to the file to read. Must be absolute, not relative."],
|
|
536
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
537
|
+
offset: Annotated[int, "Line number to start reading from (0-indexed). Use for pagination of large files."] = DEFAULT_READ_OFFSET,
|
|
538
|
+
limit: Annotated[int, "Maximum number of lines to read. Use for pagination of large files."] = DEFAULT_READ_LIMIT,
|
|
539
|
+
) -> str:
|
|
540
|
+
"""Synchronous wrapper for read_file tool."""
|
|
541
|
+
resolved_backend = self._get_backend(runtime)
|
|
542
|
+
try:
|
|
543
|
+
validated_path = _validate_path(file_path)
|
|
544
|
+
except ValueError as e:
|
|
545
|
+
return f"Error: {e}"
|
|
546
|
+
result = resolved_backend.read(validated_path, offset=offset, limit=limit)
|
|
547
|
+
|
|
548
|
+
lines = result.splitlines(keepends=True)
|
|
549
|
+
if len(lines) > limit:
|
|
550
|
+
lines = lines[:limit]
|
|
551
|
+
result = "".join(lines)
|
|
552
|
+
|
|
553
|
+
# Check if result exceeds token threshold and truncate if necessary
|
|
554
|
+
if token_limit and len(result) >= NUM_CHARS_PER_TOKEN * token_limit:
|
|
555
|
+
# Calculate truncation message length to ensure final result stays under threshold
|
|
556
|
+
truncation_msg = READ_FILE_TRUNCATION_MSG.format(file_path=validated_path)
|
|
557
|
+
max_content_length = NUM_CHARS_PER_TOKEN * token_limit - len(truncation_msg)
|
|
558
|
+
result = result[:max_content_length]
|
|
559
|
+
result += truncation_msg
|
|
560
|
+
|
|
561
|
+
return result
|
|
562
|
+
|
|
563
|
+
async def async_read_file(
|
|
564
|
+
file_path: Annotated[str, "Absolute path to the file to read. Must be absolute, not relative."],
|
|
565
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
566
|
+
offset: Annotated[int, "Line number to start reading from (0-indexed). Use for pagination of large files."] = DEFAULT_READ_OFFSET,
|
|
567
|
+
limit: Annotated[int, "Maximum number of lines to read. Use for pagination of large files."] = DEFAULT_READ_LIMIT,
|
|
568
|
+
) -> str:
|
|
569
|
+
"""Asynchronous wrapper for read_file tool."""
|
|
570
|
+
resolved_backend = self._get_backend(runtime)
|
|
571
|
+
try:
|
|
572
|
+
validated_path = _validate_path(file_path)
|
|
573
|
+
except ValueError as e:
|
|
574
|
+
return f"Error: {e}"
|
|
575
|
+
result = await resolved_backend.aread(validated_path, offset=offset, limit=limit)
|
|
576
|
+
|
|
577
|
+
lines = result.splitlines(keepends=True)
|
|
578
|
+
if len(lines) > limit:
|
|
579
|
+
lines = lines[:limit]
|
|
580
|
+
result = "".join(lines)
|
|
581
|
+
|
|
582
|
+
# Check if result exceeds token threshold and truncate if necessary
|
|
583
|
+
if token_limit and len(result) >= NUM_CHARS_PER_TOKEN * token_limit:
|
|
584
|
+
# Calculate truncation message length to ensure final result stays under threshold
|
|
585
|
+
truncation_msg = READ_FILE_TRUNCATION_MSG.format(file_path=validated_path)
|
|
586
|
+
max_content_length = NUM_CHARS_PER_TOKEN * token_limit - len(truncation_msg)
|
|
587
|
+
result = result[:max_content_length]
|
|
588
|
+
result += truncation_msg
|
|
589
|
+
|
|
590
|
+
return result
|
|
591
|
+
|
|
592
|
+
return StructuredTool.from_function(
|
|
593
|
+
name="read_file",
|
|
594
|
+
description=tool_description,
|
|
595
|
+
func=sync_read_file,
|
|
596
|
+
coroutine=async_read_file,
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
def _create_write_file_tool(self) -> BaseTool:
|
|
600
|
+
"""Create the write_file tool."""
|
|
601
|
+
tool_description = self._custom_tool_descriptions.get("write_file") or WRITE_FILE_TOOL_DESCRIPTION
|
|
602
|
+
|
|
603
|
+
def sync_write_file(
|
|
604
|
+
file_path: Annotated[str, "Absolute path where the file should be created. Must be absolute, not relative."],
|
|
605
|
+
content: Annotated[str, "The text content to write to the file. This parameter is required."],
|
|
606
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
607
|
+
) -> Command | str:
|
|
608
|
+
"""Synchronous wrapper for write_file tool."""
|
|
609
|
+
resolved_backend = self._get_backend(runtime)
|
|
610
|
+
try:
|
|
611
|
+
validated_path = _validate_path(file_path)
|
|
612
|
+
except ValueError as e:
|
|
613
|
+
return f"Error: {e}"
|
|
614
|
+
res: WriteResult = resolved_backend.write(validated_path, content)
|
|
615
|
+
if res.error:
|
|
616
|
+
return res.error
|
|
617
|
+
# If backend returns state update, wrap into Command with ToolMessage
|
|
618
|
+
if res.files_update is not None:
|
|
619
|
+
return Command(
|
|
620
|
+
update={
|
|
621
|
+
"files": res.files_update,
|
|
622
|
+
"messages": [
|
|
623
|
+
ToolMessage(
|
|
624
|
+
content=f"Updated file {res.path}",
|
|
625
|
+
tool_call_id=runtime.tool_call_id,
|
|
626
|
+
)
|
|
627
|
+
],
|
|
628
|
+
}
|
|
629
|
+
)
|
|
630
|
+
return f"Updated file {res.path}"
|
|
631
|
+
|
|
632
|
+
async def async_write_file(
|
|
633
|
+
file_path: Annotated[str, "Absolute path where the file should be created. Must be absolute, not relative."],
|
|
634
|
+
content: Annotated[str, "The text content to write to the file. This parameter is required."],
|
|
635
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
636
|
+
) -> Command | str:
|
|
637
|
+
"""Asynchronous wrapper for write_file tool."""
|
|
638
|
+
resolved_backend = self._get_backend(runtime)
|
|
639
|
+
try:
|
|
640
|
+
validated_path = _validate_path(file_path)
|
|
641
|
+
except ValueError as e:
|
|
642
|
+
return f"Error: {e}"
|
|
643
|
+
res: WriteResult = await resolved_backend.awrite(validated_path, content)
|
|
644
|
+
if res.error:
|
|
645
|
+
return res.error
|
|
646
|
+
# If backend returns state update, wrap into Command with ToolMessage
|
|
647
|
+
if res.files_update is not None:
|
|
648
|
+
return Command(
|
|
649
|
+
update={
|
|
650
|
+
"files": res.files_update,
|
|
651
|
+
"messages": [
|
|
652
|
+
ToolMessage(
|
|
653
|
+
content=f"Updated file {res.path}",
|
|
654
|
+
tool_call_id=runtime.tool_call_id,
|
|
655
|
+
)
|
|
656
|
+
],
|
|
657
|
+
}
|
|
658
|
+
)
|
|
659
|
+
return f"Updated file {res.path}"
|
|
660
|
+
|
|
661
|
+
return StructuredTool.from_function(
|
|
662
|
+
name="write_file",
|
|
663
|
+
description=tool_description,
|
|
664
|
+
func=sync_write_file,
|
|
665
|
+
coroutine=async_write_file,
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
def _create_edit_file_tool(self) -> BaseTool:
|
|
669
|
+
"""Create the edit_file tool."""
|
|
670
|
+
tool_description = self._custom_tool_descriptions.get("edit_file") or EDIT_FILE_TOOL_DESCRIPTION
|
|
671
|
+
|
|
672
|
+
def sync_edit_file(
|
|
673
|
+
file_path: Annotated[str, "Absolute path to the file to edit. Must be absolute, not relative."],
|
|
674
|
+
old_string: Annotated[str, "The exact text to find and replace. Must be unique in the file unless replace_all is True."],
|
|
675
|
+
new_string: Annotated[str, "The text to replace old_string with. Must be different from old_string."],
|
|
676
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
677
|
+
*,
|
|
678
|
+
replace_all: Annotated[bool, "If True, replace all occurrences of old_string. If False (default), old_string must be unique."] = False,
|
|
679
|
+
) -> Command | str:
|
|
680
|
+
"""Synchronous wrapper for edit_file tool."""
|
|
681
|
+
resolved_backend = self._get_backend(runtime)
|
|
682
|
+
try:
|
|
683
|
+
validated_path = _validate_path(file_path)
|
|
684
|
+
except ValueError as e:
|
|
685
|
+
return f"Error: {e}"
|
|
686
|
+
res: EditResult = resolved_backend.edit(validated_path, old_string, new_string, replace_all=replace_all)
|
|
687
|
+
if res.error:
|
|
688
|
+
return res.error
|
|
689
|
+
if res.files_update is not None:
|
|
690
|
+
return Command(
|
|
691
|
+
update={
|
|
692
|
+
"files": res.files_update,
|
|
693
|
+
"messages": [
|
|
694
|
+
ToolMessage(
|
|
695
|
+
content=f"Successfully replaced {res.occurrences} instance(s) of the string in '{res.path}'",
|
|
696
|
+
tool_call_id=runtime.tool_call_id,
|
|
697
|
+
)
|
|
698
|
+
],
|
|
699
|
+
}
|
|
700
|
+
)
|
|
701
|
+
return f"Successfully replaced {res.occurrences} instance(s) of the string in '{res.path}'"
|
|
702
|
+
|
|
703
|
+
async def async_edit_file(
|
|
704
|
+
file_path: Annotated[str, "Absolute path to the file to edit. Must be absolute, not relative."],
|
|
705
|
+
old_string: Annotated[str, "The exact text to find and replace. Must be unique in the file unless replace_all is True."],
|
|
706
|
+
new_string: Annotated[str, "The text to replace old_string with. Must be different from old_string."],
|
|
707
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
708
|
+
*,
|
|
709
|
+
replace_all: Annotated[bool, "If True, replace all occurrences of old_string. If False (default), old_string must be unique."] = False,
|
|
710
|
+
) -> Command | str:
|
|
711
|
+
"""Asynchronous wrapper for edit_file tool."""
|
|
712
|
+
resolved_backend = self._get_backend(runtime)
|
|
713
|
+
try:
|
|
714
|
+
validated_path = _validate_path(file_path)
|
|
715
|
+
except ValueError as e:
|
|
716
|
+
return f"Error: {e}"
|
|
717
|
+
res: EditResult = await resolved_backend.aedit(validated_path, old_string, new_string, replace_all=replace_all)
|
|
718
|
+
if res.error:
|
|
719
|
+
return res.error
|
|
720
|
+
if res.files_update is not None:
|
|
721
|
+
return Command(
|
|
722
|
+
update={
|
|
723
|
+
"files": res.files_update,
|
|
724
|
+
"messages": [
|
|
725
|
+
ToolMessage(
|
|
726
|
+
content=f"Successfully replaced {res.occurrences} instance(s) of the string in '{res.path}'",
|
|
727
|
+
tool_call_id=runtime.tool_call_id,
|
|
728
|
+
)
|
|
729
|
+
],
|
|
730
|
+
}
|
|
731
|
+
)
|
|
732
|
+
return f"Successfully replaced {res.occurrences} instance(s) of the string in '{res.path}'"
|
|
733
|
+
|
|
734
|
+
return StructuredTool.from_function(
|
|
735
|
+
name="edit_file",
|
|
736
|
+
description=tool_description,
|
|
737
|
+
func=sync_edit_file,
|
|
738
|
+
coroutine=async_edit_file,
|
|
739
|
+
)
|
|
740
|
+
|
|
741
|
+
def _create_glob_tool(self) -> BaseTool:
|
|
742
|
+
"""Create the glob tool."""
|
|
743
|
+
tool_description = self._custom_tool_descriptions.get("glob") or GLOB_TOOL_DESCRIPTION
|
|
744
|
+
|
|
745
|
+
def sync_glob(
|
|
746
|
+
pattern: Annotated[str, "Glob pattern to match files (e.g., '**/*.py', '*.txt', '/subdir/**/*.md')."],
|
|
747
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
748
|
+
path: Annotated[str, "Base directory to search from. Defaults to root '/'."] = "/",
|
|
749
|
+
) -> str:
|
|
750
|
+
"""Synchronous wrapper for glob tool."""
|
|
751
|
+
resolved_backend = self._get_backend(runtime)
|
|
752
|
+
infos = resolved_backend.glob_info(pattern, path=path)
|
|
753
|
+
paths = [fi.get("path", "") for fi in infos]
|
|
754
|
+
result = truncate_if_too_long(paths)
|
|
755
|
+
return str(result)
|
|
756
|
+
|
|
757
|
+
async def async_glob(
|
|
758
|
+
pattern: Annotated[str, "Glob pattern to match files (e.g., '**/*.py', '*.txt', '/subdir/**/*.md')."],
|
|
759
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
760
|
+
path: Annotated[str, "Base directory to search from. Defaults to root '/'."] = "/",
|
|
761
|
+
) -> str:
|
|
762
|
+
"""Asynchronous wrapper for glob tool."""
|
|
763
|
+
resolved_backend = self._get_backend(runtime)
|
|
764
|
+
infos = await resolved_backend.aglob_info(pattern, path=path)
|
|
765
|
+
paths = [fi.get("path", "") for fi in infos]
|
|
766
|
+
result = truncate_if_too_long(paths)
|
|
767
|
+
return str(result)
|
|
768
|
+
|
|
769
|
+
return StructuredTool.from_function(
|
|
770
|
+
name="glob",
|
|
771
|
+
description=tool_description,
|
|
772
|
+
func=sync_glob,
|
|
773
|
+
coroutine=async_glob,
|
|
774
|
+
)
|
|
775
|
+
|
|
776
|
+
def _create_grep_tool(self) -> BaseTool:
|
|
777
|
+
"""Create the grep tool."""
|
|
778
|
+
tool_description = self._custom_tool_descriptions.get("grep") or GREP_TOOL_DESCRIPTION
|
|
779
|
+
|
|
780
|
+
def sync_grep(
|
|
781
|
+
pattern: Annotated[str, "Text pattern to search for (literal string, not regex)."],
|
|
782
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
783
|
+
path: Annotated[str | None, "Directory to search in. Defaults to current working directory."] = None,
|
|
784
|
+
glob: Annotated[str | None, "Glob pattern to filter which files to search (e.g., '*.py')."] = None,
|
|
785
|
+
output_mode: Annotated[
|
|
786
|
+
Literal["files_with_matches", "content", "count"],
|
|
787
|
+
"Output format: 'files_with_matches' (file paths only, default), 'content' (matching lines with context), 'count' (match counts per file).",
|
|
788
|
+
] = "files_with_matches",
|
|
789
|
+
) -> str:
|
|
790
|
+
"""Synchronous wrapper for grep tool."""
|
|
791
|
+
resolved_backend = self._get_backend(runtime)
|
|
792
|
+
raw = resolved_backend.grep_raw(pattern, path=path, glob=glob)
|
|
793
|
+
if isinstance(raw, str):
|
|
794
|
+
return raw
|
|
795
|
+
formatted = format_grep_matches(raw, output_mode)
|
|
796
|
+
return truncate_if_too_long(formatted) # type: ignore[arg-type]
|
|
797
|
+
|
|
798
|
+
async def async_grep(
|
|
799
|
+
pattern: Annotated[str, "Text pattern to search for (literal string, not regex)."],
|
|
800
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
801
|
+
path: Annotated[str | None, "Directory to search in. Defaults to current working directory."] = None,
|
|
802
|
+
glob: Annotated[str | None, "Glob pattern to filter which files to search (e.g., '*.py')."] = None,
|
|
803
|
+
output_mode: Annotated[
|
|
804
|
+
Literal["files_with_matches", "content", "count"],
|
|
805
|
+
"Output format: 'files_with_matches' (file paths only, default), 'content' (matching lines with context), 'count' (match counts per file).",
|
|
806
|
+
] = "files_with_matches",
|
|
807
|
+
) -> str:
|
|
808
|
+
"""Asynchronous wrapper for grep tool."""
|
|
809
|
+
resolved_backend = self._get_backend(runtime)
|
|
810
|
+
raw = await resolved_backend.agrep_raw(pattern, path=path, glob=glob)
|
|
811
|
+
if isinstance(raw, str):
|
|
812
|
+
return raw
|
|
813
|
+
formatted = format_grep_matches(raw, output_mode)
|
|
814
|
+
return truncate_if_too_long(formatted) # type: ignore[arg-type]
|
|
815
|
+
|
|
816
|
+
return StructuredTool.from_function(
|
|
817
|
+
name="grep",
|
|
818
|
+
description=tool_description,
|
|
819
|
+
func=sync_grep,
|
|
820
|
+
coroutine=async_grep,
|
|
821
|
+
)
|
|
822
|
+
|
|
823
|
+
def _create_execute_tool(self) -> BaseTool:
|
|
824
|
+
"""Create the execute tool for sandbox command execution."""
|
|
825
|
+
tool_description = self._custom_tool_descriptions.get("execute") or EXECUTE_TOOL_DESCRIPTION
|
|
826
|
+
|
|
827
|
+
def sync_execute(
|
|
828
|
+
command: Annotated[str, "Shell command to execute in the sandbox environment."],
|
|
829
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
830
|
+
) -> str:
|
|
831
|
+
"""Synchronous wrapper for execute tool."""
|
|
832
|
+
resolved_backend = self._get_backend(runtime)
|
|
833
|
+
|
|
834
|
+
# Runtime check - fail gracefully if not supported
|
|
835
|
+
if not _supports_execution(resolved_backend):
|
|
836
|
+
return (
|
|
837
|
+
"Error: Execution not available. This agent's backend "
|
|
838
|
+
"does not support command execution (SandboxBackendProtocol). "
|
|
839
|
+
"To use the execute tool, provide a backend that implements SandboxBackendProtocol."
|
|
840
|
+
)
|
|
841
|
+
|
|
842
|
+
try:
|
|
843
|
+
result = resolved_backend.execute(command)
|
|
844
|
+
except NotImplementedError as e:
|
|
845
|
+
# Handle case where execute() exists but raises NotImplementedError
|
|
846
|
+
return f"Error: Execution not available. {e}"
|
|
847
|
+
|
|
848
|
+
# Format output for LLM consumption
|
|
849
|
+
parts = [result.output]
|
|
850
|
+
|
|
851
|
+
if result.exit_code is not None:
|
|
852
|
+
status = "succeeded" if result.exit_code == 0 else "failed"
|
|
853
|
+
parts.append(f"\n[Command {status} with exit code {result.exit_code}]")
|
|
854
|
+
|
|
855
|
+
if result.truncated:
|
|
856
|
+
parts.append("\n[Output was truncated due to size limits]")
|
|
857
|
+
|
|
858
|
+
return "".join(parts)
|
|
859
|
+
|
|
860
|
+
async def async_execute(
|
|
861
|
+
command: Annotated[str, "Shell command to execute in the sandbox environment."],
|
|
862
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
863
|
+
) -> str:
|
|
864
|
+
"""Asynchronous wrapper for execute tool."""
|
|
865
|
+
resolved_backend = self._get_backend(runtime)
|
|
866
|
+
|
|
867
|
+
# Runtime check - fail gracefully if not supported
|
|
868
|
+
if not _supports_execution(resolved_backend):
|
|
869
|
+
return (
|
|
870
|
+
"Error: Execution not available. This agent's backend "
|
|
871
|
+
"does not support command execution (SandboxBackendProtocol). "
|
|
872
|
+
"To use the execute tool, provide a backend that implements SandboxBackendProtocol."
|
|
873
|
+
)
|
|
874
|
+
|
|
875
|
+
try:
|
|
876
|
+
result = await resolved_backend.aexecute(command)
|
|
877
|
+
except NotImplementedError as e:
|
|
878
|
+
# Handle case where execute() exists but raises NotImplementedError
|
|
879
|
+
return f"Error: Execution not available. {e}"
|
|
880
|
+
|
|
881
|
+
# Format output for LLM consumption
|
|
882
|
+
parts = [result.output]
|
|
883
|
+
|
|
884
|
+
if result.exit_code is not None:
|
|
885
|
+
status = "succeeded" if result.exit_code == 0 else "failed"
|
|
886
|
+
parts.append(f"\n[Command {status} with exit code {result.exit_code}]")
|
|
887
|
+
|
|
888
|
+
if result.truncated:
|
|
889
|
+
parts.append("\n[Output was truncated due to size limits]")
|
|
890
|
+
|
|
891
|
+
return "".join(parts)
|
|
892
|
+
|
|
893
|
+
return StructuredTool.from_function(
|
|
894
|
+
name="execute",
|
|
895
|
+
description=tool_description,
|
|
896
|
+
func=sync_execute,
|
|
897
|
+
coroutine=async_execute,
|
|
898
|
+
)
|
|
899
|
+
|
|
935
900
|
def wrap_model_call(
|
|
936
901
|
self,
|
|
937
902
|
request: ModelRequest,
|
|
@@ -1054,7 +1019,7 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1054
1019
|
The model can recover by reading the offloaded file from the backend.
|
|
1055
1020
|
"""
|
|
1056
1021
|
# Early exit if eviction not configured
|
|
1057
|
-
if not self.
|
|
1022
|
+
if not self._tool_token_limit_before_evict:
|
|
1058
1023
|
return message, None
|
|
1059
1024
|
|
|
1060
1025
|
# Convert content to string once for both size check and eviction
|
|
@@ -1074,9 +1039,7 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1074
1039
|
content_str = str(message.content)
|
|
1075
1040
|
|
|
1076
1041
|
# Check if content exceeds eviction threshold
|
|
1077
|
-
|
|
1078
|
-
# This errs on the high side to avoid premature eviction of content that might fit
|
|
1079
|
-
if len(content_str) <= 4 * self.tool_token_limit_before_evict:
|
|
1042
|
+
if len(content_str) <= NUM_CHARS_PER_TOKEN * self._tool_token_limit_before_evict:
|
|
1080
1043
|
return message, None
|
|
1081
1044
|
|
|
1082
1045
|
# Write content to filesystem
|
|
@@ -1086,8 +1049,8 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1086
1049
|
if result.error:
|
|
1087
1050
|
return message, None
|
|
1088
1051
|
|
|
1089
|
-
# Create
|
|
1090
|
-
content_sample =
|
|
1052
|
+
# Create preview showing head and tail of the result
|
|
1053
|
+
content_sample = _create_content_preview(content_str)
|
|
1091
1054
|
replacement_text = TOO_LARGE_TOOL_MSG.format(
|
|
1092
1055
|
tool_call_id=message.tool_call_id,
|
|
1093
1056
|
file_path=file_path,
|
|
@@ -1113,7 +1076,7 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1113
1076
|
See _process_large_message for full documentation.
|
|
1114
1077
|
"""
|
|
1115
1078
|
# Early exit if eviction not configured
|
|
1116
|
-
if not self.
|
|
1079
|
+
if not self._tool_token_limit_before_evict:
|
|
1117
1080
|
return message, None
|
|
1118
1081
|
|
|
1119
1082
|
# Convert content to string once for both size check and eviction
|
|
@@ -1132,10 +1095,7 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1132
1095
|
# Multiple blocks or non-text content - stringify entire structure
|
|
1133
1096
|
content_str = str(message.content)
|
|
1134
1097
|
|
|
1135
|
-
|
|
1136
|
-
# Using 4 chars per token as a conservative approximation (actual ratio varies by content)
|
|
1137
|
-
# This errs on the high side to avoid premature eviction of content that might fit
|
|
1138
|
-
if len(content_str) <= 4 * self.tool_token_limit_before_evict:
|
|
1098
|
+
if len(content_str) <= NUM_CHARS_PER_TOKEN * self._tool_token_limit_before_evict:
|
|
1139
1099
|
return message, None
|
|
1140
1100
|
|
|
1141
1101
|
# Write content to filesystem using async method
|
|
@@ -1145,8 +1105,8 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1145
1105
|
if result.error:
|
|
1146
1106
|
return message, None
|
|
1147
1107
|
|
|
1148
|
-
# Create
|
|
1149
|
-
content_sample =
|
|
1108
|
+
# Create preview showing head and tail of the result
|
|
1109
|
+
content_sample = _create_content_preview(content_str)
|
|
1150
1110
|
replacement_text = TOO_LARGE_TOOL_MSG.format(
|
|
1151
1111
|
tool_call_id=message.tool_call_id,
|
|
1152
1112
|
file_path=file_path,
|
|
@@ -1277,7 +1237,7 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1277
1237
|
Returns:
|
|
1278
1238
|
The raw ToolMessage, or a pseudo tool message with the ToolResult in state.
|
|
1279
1239
|
"""
|
|
1280
|
-
if self.
|
|
1240
|
+
if self._tool_token_limit_before_evict is None or request.tool_call["name"] in TOOLS_EXCLUDED_FROM_EVICTION:
|
|
1281
1241
|
return handler(request)
|
|
1282
1242
|
|
|
1283
1243
|
tool_result = handler(request)
|
|
@@ -1297,7 +1257,7 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1297
1257
|
Returns:
|
|
1298
1258
|
The raw ToolMessage, or a pseudo tool message with the ToolResult in state.
|
|
1299
1259
|
"""
|
|
1300
|
-
if self.
|
|
1260
|
+
if self._tool_token_limit_before_evict is None or request.tool_call["name"] in TOOLS_EXCLUDED_FROM_EVICTION:
|
|
1301
1261
|
return await handler(request)
|
|
1302
1262
|
|
|
1303
1263
|
tool_result = await handler(request)
|