skydeckai-code 0.1.23__py3-none-any.whl → 0.1.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aidd/tools/file_tools.py CHANGED
@@ -4,10 +4,12 @@ import os
4
4
  import re
5
5
  import stat
6
6
  import subprocess
7
+ import shutil
7
8
  from datetime import datetime
8
9
  from typing import List
9
10
 
10
11
  import mcp.types as types
12
+ from mcp.types import TextContent
11
13
 
12
14
  from .state import state
13
15
 
@@ -15,20 +17,28 @@ from .state import state
15
17
  def read_file_tool():
16
18
  return {
17
19
  "name": "read_file",
18
- "description": "Read the complete contents of a file from the file system. "
20
+ "description": "Read the contents of a file from the file system. "
19
21
  "WHEN TO USE: When you need to examine the actual content of a single file, view source code, check configuration files, or analyze text data. "
20
22
  "This is the primary tool for accessing file contents directly. "
21
23
  "WHEN NOT TO USE: When you only need file metadata like size or modification date (use get_file_info instead), when you need to list directory contents "
22
24
  "(use directory_listing instead), or when you need to read multiple files at once (use read_multiple_files instead). "
23
- "RETURNS: The complete text content of the specified file. Binary files or files with unknown encodings will return an error message. "
25
+ "RETURNS: The complete text content of the specified file or the requested portion if offset/limit are specified. Binary files or files with unknown encodings will return an error message. "
24
26
  "Handles various text encodings and provides detailed error messages if the file cannot be read. Only works within the allowed directory. "
25
- "Example: Enter 'src/main.py' to read a Python file.",
27
+ "Example: Enter 'src/main.py' to read a Python file, or add offset/limit to read specific line ranges.",
26
28
  "inputSchema": {
27
29
  "type": "object",
28
30
  "properties": {
29
31
  "path": {
30
32
  "type": "string",
31
- "description": "Path to the file to read. This must be a path to a file, not a directory. Examples: 'README.md', 'src/main.py', 'config.json'. Both absolute and relative paths are supported, but must be within the allowed workspace.",
33
+ "description": "Path to the file to read. This must be a path to a file, not a directory. Examples: 'README.md', 'src/main.py', 'config.json'. Both absolute and relative paths are supported, but must be within the allowed workspace."
34
+ },
35
+ "offset": {
36
+ "type": "integer",
37
+ "description": "Line number to start reading from (1-indexed). If specified, the file will be read starting from this line. Default is to start from the beginning of the file.",
38
+ },
39
+ "limit": {
40
+ "type": "integer",
41
+ "description": "Maximum number of lines to read after the offset. If specified along with offset, only this many lines will be read. Default is to read to the end of the file.",
32
42
  }
33
43
  },
34
44
  "required": ["path"]
@@ -91,6 +101,40 @@ def move_file_tool():
91
101
  },
92
102
  }
93
103
 
104
+ def copy_file_tool():
105
+ return {
106
+ "name": "copy_file",
107
+ "description": "Copy a file or directory to a new location. "
108
+ "WHEN TO USE: When you need to duplicate files or directories while keeping the original intact, create backups, "
109
+ "or replicate configuration files for different environments. Useful for testing changes without risking original files, "
110
+ "creating template files, or duplicating project structures. "
111
+ "WHEN NOT TO USE: When you want to move a file without keeping the original (use move_file instead), when the destination "
112
+ "already exists (the operation will fail), or when either source or destination is outside the allowed workspace. "
113
+ "RETURNS: A confirmation message indicating that the file or directory was successfully copied. "
114
+ "For directories, the entire directory structure is copied recursively. Parent directories of the destination "
115
+ "will be created automatically if they don't exist. Both source and destination must be within the allowed directory. "
116
+ "Example: source='config.json', destination='config.backup.json'",
117
+ "inputSchema": {
118
+ "type": "object",
119
+ "properties": {
120
+ "source": {
121
+ "type": "string",
122
+ "description": "Source path of the file or directory to copy. This file or directory must exist. Both absolute and relative paths are supported, but must be within the allowed workspace. Examples: 'document.txt', 'src/utils.js', 'config/settings/'"
123
+ },
124
+ "destination": {
125
+ "type": "string",
126
+ "description": "Destination path where to copy the file or directory. If this path already exists, the operation will fail. Parent directories will be created automatically if they don't exist. Both absolute and relative paths are supported, but must be within the allowed workspace. Examples: 'document.backup.txt', 'backup/document.txt', 'src/new-project/'"
127
+ },
128
+ "recursive": {
129
+ "type": "boolean",
130
+ "description": "Whether to copy directories recursively. If set to true and the source is a directory, all subdirectories and files will be copied. If set to false and the source is a directory, the operation will fail. Defaults to true.",
131
+ "default": True
132
+ }
133
+ },
134
+ "required": ["source", "destination"]
135
+ },
136
+ }
137
+
94
138
  def search_files_tool():
95
139
  return {
96
140
  "name": "search_files",
@@ -98,8 +142,8 @@ def search_files_tool():
98
142
  "WHEN TO USE: When you need to find files or directories by name pattern across a directory tree, locate files with specific extensions, "
99
143
  "or find items containing certain text in their names. Useful for locating configuration files, finding all files of a certain type, "
100
144
  "or gathering files related to a specific feature. "
101
- "WHEN NOT TO USE: When searching for content within files (there is no grep tool in this application), when you need a flat listing of a single directory "
102
- "(use directory_listing instead), or when you need to analyze code structure (use codebase_mapper instead). "
145
+ "WHEN NOT TO USE: When searching for content within files (use search_code tool for that), when you need a flat listing of a single directory "
146
+ "(use list_directory instead), or when you need to analyze code structure (use codebase_mapper instead). "
103
147
  "RETURNS: A list of matching files and directories with their types ([FILE] or [DIR]) and relative paths. "
104
148
  "For Git repositories, only shows tracked files and directories by default. "
105
149
  "The search is recursive and case-insensitive. Only searches within the allowed directory. "
@@ -261,10 +305,8 @@ def edit_file_tool():
261
305
  }
262
306
  }
263
307
 
264
- async def _read_single_file(path: str) -> List[types.TextContent]:
308
+ async def _read_single_file(path: str, offset: int = None, limit: int = None) -> List[TextContent]:
265
309
  """Helper function to read a single file with proper validation."""
266
- from mcp.types import TextContent
267
-
268
310
  # Determine full path based on whether input is absolute or relative
269
311
  if os.path.isabs(path):
270
312
  full_path = os.path.abspath(path) # Just normalize the absolute path
@@ -282,7 +324,40 @@ async def _read_single_file(path: str) -> List[types.TextContent]:
282
324
 
283
325
  try:
284
326
  with open(full_path, 'r', encoding='utf-8') as f:
285
- content = f.read()
327
+ # If we have offset/limit parameters, read only the specified lines
328
+ if offset is not None or limit is not None:
329
+ lines = f.readlines()
330
+
331
+ # Determine start line - convert from 1-indexed to 0-indexed
332
+ start_idx = 0
333
+ if offset is not None:
334
+ start_idx = max(0, offset - 1) # Convert 1-indexed to 0-indexed
335
+
336
+ # Determine end line
337
+ end_idx = len(lines)
338
+ if limit is not None:
339
+ end_idx = min(len(lines), start_idx + limit)
340
+
341
+ # Read only the specified range
342
+ content = ''.join(lines[start_idx:end_idx])
343
+
344
+ # Add summary information about the file before and after the selected range
345
+ total_lines = len(lines)
346
+ summary = []
347
+
348
+ if start_idx > 0:
349
+ summary.append(f"[...{start_idx} lines before...]")
350
+
351
+ if end_idx < total_lines:
352
+ summary.append(f"[...{total_lines - end_idx} lines after...]")
353
+
354
+ if summary:
355
+ content = '\n'.join(summary) + '\n' + content
356
+
357
+ else:
358
+ # Read the entire file
359
+ content = f.read()
360
+
286
361
  return [TextContent(
287
362
  type="text",
288
363
  text=content
@@ -296,8 +371,6 @@ async def _read_single_file(path: str) -> List[types.TextContent]:
296
371
 
297
372
  async def handle_write_file(arguments: dict):
298
373
  """Handle writing content to a file."""
299
- from mcp.types import TextContent
300
-
301
374
  path = arguments.get("path")
302
375
  content = arguments.get("content")
303
376
 
@@ -331,13 +404,16 @@ async def handle_write_file(arguments: dict):
331
404
  except Exception as e:
332
405
  raise ValueError(f"Error writing file: {str(e)}")
333
406
 
334
-
335
407
  async def handle_read_file(arguments: dict):
336
408
  path = arguments.get("path")
337
409
  if not path:
338
410
  raise ValueError("path must be provided")
339
411
 
340
- return await _read_single_file(path)
412
+ # Get the line range parameters
413
+ offset = arguments.get("offset")
414
+ limit = arguments.get("limit")
415
+
416
+ return await _read_single_file(path, offset, limit)
341
417
 
342
418
  async def handle_read_multiple_files(arguments: dict):
343
419
  paths = arguments.get("paths", [])
@@ -348,7 +424,6 @@ async def handle_read_multiple_files(arguments: dict):
348
424
  if not paths:
349
425
  raise ValueError("paths list cannot be empty")
350
426
 
351
- from mcp.types import TextContent
352
427
  results = []
353
428
  for path in paths:
354
429
  try:
@@ -369,8 +444,6 @@ async def handle_read_multiple_files(arguments: dict):
369
444
 
370
445
  async def handle_move_file(arguments: dict):
371
446
  """Handle moving a file or directory to a new location."""
372
- from mcp.types import TextContent
373
-
374
447
  source = arguments.get("source")
375
448
  destination = arguments.get("destination")
376
449
 
@@ -415,10 +488,67 @@ async def handle_move_file(arguments: dict):
415
488
  except Exception as e:
416
489
  raise ValueError(f"Unexpected error: {str(e)}")
417
490
 
491
+ async def handle_copy_file(arguments: dict):
492
+ """Handle copying a file or directory to a new location."""
493
+ source = arguments.get("source")
494
+ destination = arguments.get("destination")
495
+ recursive = arguments.get("recursive", True)
496
+
497
+ if not source:
498
+ raise ValueError("source must be provided")
499
+ if not destination:
500
+ raise ValueError("destination must be provided")
501
+
502
+ # Determine full paths based on whether inputs are absolute or relative
503
+ if os.path.isabs(source):
504
+ full_source = os.path.abspath(source)
505
+ else:
506
+ full_source = os.path.abspath(os.path.join(state.allowed_directory, source))
507
+
508
+ if os.path.isabs(destination):
509
+ full_destination = os.path.abspath(destination)
510
+ else:
511
+ full_destination = os.path.abspath(os.path.join(state.allowed_directory, destination))
512
+
513
+ # Security checks
514
+ if not full_source.startswith(state.allowed_directory):
515
+ raise ValueError(f"Access denied: Source path ({full_source}) must be within allowed directory")
516
+ if not full_destination.startswith(state.allowed_directory):
517
+ raise ValueError(f"Access denied: Destination path ({full_destination}) must be within allowed directory")
518
+
519
+ # Validate source exists
520
+ if not os.path.exists(full_source):
521
+ raise ValueError(f"Source path does not exist: {source}")
522
+
523
+ # Check if destination already exists
524
+ if os.path.exists(full_destination):
525
+ raise ValueError(f"Destination already exists: {destination}")
526
+
527
+ # Create parent directories of destination if they don't exist
528
+ os.makedirs(os.path.dirname(full_destination), exist_ok=True)
529
+
530
+ try:
531
+ if os.path.isdir(full_source):
532
+ if not recursive:
533
+ raise ValueError(f"Cannot copy directory without recursive flag: {source}")
534
+ # Copy directory recursively
535
+ shutil.copytree(full_source, full_destination)
536
+ return [TextContent(
537
+ type="text",
538
+ text=f"Successfully copied directory {source} to {destination}"
539
+ )]
540
+ else:
541
+ # Copy file
542
+ shutil.copy2(full_source, full_destination)
543
+ return [TextContent(
544
+ type="text",
545
+ text=f"Successfully copied file {source} to {destination}"
546
+ )]
547
+ except Exception as e:
548
+ raise ValueError(f"Error copying {source} to {destination}: {str(e)}")
549
+
418
550
  async def handle_search_files(arguments: dict):
419
551
  """Handle searching for files matching a pattern."""
420
- from mcp.types import TextContent
421
-
422
552
  pattern = arguments.get("pattern")
423
553
  start_path = arguments.get("path", ".")
424
554
  include_hidden = arguments.get("include_hidden", False)
@@ -524,8 +654,6 @@ async def handle_search_files(arguments: dict):
524
654
 
525
655
  async def handle_get_file_info(arguments: dict):
526
656
  """Handle getting detailed information about a file or directory."""
527
- from mcp.types import TextContent
528
-
529
657
  path = arguments.get("path")
530
658
  if not path:
531
659
  raise ValueError("path must be provided")
@@ -566,8 +694,6 @@ Permissions: {perms}"""
566
694
 
567
695
  async def handle_delete_file(arguments: dict):
568
696
  """Handle deleting a file or empty directory."""
569
- from mcp.types import TextContent
570
-
571
697
  path = arguments.get("path")
572
698
  if not path:
573
699
  raise ValueError("path must be provided")
@@ -748,8 +874,6 @@ async def apply_file_edits(file_path: str, edits: List[dict], dry_run: bool = Fa
748
874
 
749
875
  async def handle_edit_file(arguments: dict):
750
876
  """Handle editing a file with pattern matching and formatting."""
751
- from mcp.types import TextContent
752
-
753
877
  path = arguments.get("path")
754
878
  edits = arguments.get("edits")
755
879
  dry_run = arguments.get("dryRun", False)
aidd/tools/git_tools.py CHANGED
@@ -424,6 +424,46 @@ def git_show_tool():
424
424
  }
425
425
  }
426
426
 
427
+ def git_clone_tool():
428
+ return {
429
+ "name": "git_clone",
430
+ "description": "Clones a remote Git repository into a new directory. "
431
+ "WHEN TO USE: When you need to download a copy of an existing Git repository, start working with a "
432
+ "remote codebase, or initialize a new local copy of a project. Useful for contributing to open-source "
433
+ "projects, setting up new development environments, or accessing shared code repositories. "
434
+ "WHEN NOT TO USE: When the target directory already contains a Git repository, when you only need to "
435
+ "update an existing repository (use git_pull instead), or when you want to create a new empty repository "
436
+ "(use git_init instead). "
437
+ "RETURNS: A confirmation message indicating that the repository was successfully cloned, including "
438
+ "the source URL and destination directory. Repository must be cloned to a location within the allowed directory.",
439
+ "inputSchema": {
440
+ "type": "object",
441
+ "properties": {
442
+ "url": {
443
+ "type": "string",
444
+ "description": "URL of the remote repository to clone. This can be an HTTPS URL, SSH URL, or local path. "
445
+ "Examples: 'https://github.com/username/repo.git', 'git@github.com:username/repo.git', "
446
+ "'/path/to/local/repo'. Security restrictions may apply to certain URLs."
447
+ },
448
+ "target_path": {
449
+ "type": "string",
450
+ "description": "Directory where the repository will be cloned. If the directory doesn't exist, it will "
451
+ "be created. If it exists, it must be empty. Examples: 'my-project', 'src/external', "
452
+ "'path/to/clone'. Both absolute and relative paths are supported, but must be within "
453
+ "the allowed workspace."
454
+ },
455
+ "branch": {
456
+ "type": "string",
457
+ "description": "Branch to check out after cloning. If not specified, the repository's default branch "
458
+ "is used. Examples: 'main', 'develop', 'feature/new-feature'. Specifying a branch can "
459
+ "save time when working with large repositories.",
460
+ "default": None
461
+ }
462
+ },
463
+ "required": ["url", "target_path"]
464
+ }
465
+ }
466
+
427
467
  async def handle_git_status(arguments: dict) -> List[TextContent]:
428
468
  """Handle getting git repository status."""
429
469
  repo = _get_repo(arguments["repo_path"])
@@ -685,3 +725,50 @@ async def handle_git_show(arguments: dict) -> List[TextContent]:
685
725
  )]
686
726
  except Exception as e:
687
727
  raise ValueError(f"Error showing commit at '{repo.working_dir}': {str(e)}")
728
+
729
+ async def handle_git_clone(arguments: dict) -> List[TextContent]:
730
+ """Handle cloning a remote git repository."""
731
+ url = arguments.get("url")
732
+ if not url:
733
+ raise ValueError("url is required")
734
+ target_path = arguments.get("target_path")
735
+ if not target_path:
736
+ raise ValueError("target_path is required")
737
+ branch = arguments.get("branch")
738
+
739
+ # Determine full path based on whether input is absolute or relative
740
+ if os.path.isabs(target_path):
741
+ full_path = os.path.abspath(target_path)
742
+ else:
743
+ full_path = os.path.abspath(os.path.join(state.allowed_directory, target_path))
744
+
745
+ # Security check
746
+ if not full_path.startswith(state.allowed_directory):
747
+ raise ValueError(f"Access denied: Path ({full_path}) must be within allowed directory")
748
+
749
+ try:
750
+ # Check if directory exists and is empty
751
+ if os.path.exists(full_path):
752
+ if os.path.isdir(full_path) and os.listdir(full_path):
753
+ raise ValueError(f"Target directory '{full_path}' is not empty")
754
+ elif not os.path.isdir(full_path):
755
+ raise ValueError(f"Target path '{full_path}' exists but is not a directory")
756
+ else:
757
+ # Create directory if it doesn't exist
758
+ os.makedirs(full_path, exist_ok=True)
759
+
760
+ # Clone options
761
+ clone_args = {}
762
+ if branch:
763
+ clone_args["branch"] = branch
764
+
765
+ # Perform the clone
766
+ repo = git.Repo.clone_from(url, full_path, **clone_args)
767
+
768
+ branch_info = f" (branch: {branch})" if branch else ""
769
+ return [TextContent(
770
+ type="text",
771
+ text=f"Repository successfully cloned from {url} to {target_path}{branch_info}"
772
+ )]
773
+ except Exception as e:
774
+ raise ValueError(f"Error cloning repository: {str(e)}")
@@ -0,0 +1,238 @@
1
+ import asyncio
2
+ from typing import List, Dict, Any, Callable
3
+
4
+ from mcp.types import TextContent
5
+
6
+ from .state import state
7
+
8
+
9
+ def batch_tools_tool():
10
+ return {
11
+ "name": "batch_tools",
12
+ "description": "Execute multiple tool invocations in parallel or serially. "
13
+ "WHEN TO USE: When you need to run multiple operations efficiently in a single request, "
14
+ "combine related operations, or gather results from different tools. Useful for bulk operations, "
15
+ "coordinated tasks, or performing multiple queries simultaneously. "
16
+ "WHEN NOT TO USE: When operations need to be performed strictly in sequence where each step depends "
17
+ "on the previous step's result, when performing simple operations that don't benefit from batching, "
18
+ "or when you need fine-grained error handling. "
19
+ "RETURNS: Results from all tool invocations grouped together. Each result includes the tool name "
20
+ "and its output. If any individual tool fails, its error is included but other tools continue execution. "
21
+ "Parallelizable tools are executed concurrently for performance. Each tool's output is presented in "
22
+ "a structured format along with the description you provided. "
23
+ "IMPORTANT NOTE: All tools in the batch execute in the same working directory context. If a tool creates a directory "
24
+ "and a subsequent tool needs to work inside that directory, you must either use paths relative to the current working directory "
25
+ "or include an explicit tool invocation to change directories (e.g., update_allowed_directory).",
26
+ "inputSchema": {
27
+ "type": "object",
28
+ "properties": {
29
+ "description": {
30
+ "type": "string",
31
+ "description": "A short (3-5 word) description of the batch operation. This helps identify the purpose "
32
+ "of the batch and provides context for the results. Examples: 'Setup new project', "
33
+ "'Analyze codebase', 'Gather system info'."
34
+ },
35
+ "sequential": {
36
+ "type": "boolean",
37
+ "description": "Whether to run tools in sequential order (true) or parallel when possible (false). "
38
+ "Use sequential mode when tools need to build on the results of previous tools. "
39
+ "Default is false (parallel execution).",
40
+ "default": False
41
+ },
42
+ "invocations": {
43
+ "type": "array",
44
+ "items": {
45
+ "type": "object",
46
+ "properties": {
47
+ "tool": {
48
+ "type": "string",
49
+ "description": "Name of the tool to invoke. Must be a valid tool name registered in the system."
50
+ },
51
+ "arguments": {
52
+ "type": "object",
53
+ "description": "Arguments to pass to the tool. These should match the required arguments "
54
+ "for the specified tool."
55
+ }
56
+ },
57
+ "required": ["tool", "arguments"]
58
+ },
59
+ "description": "List of tool invocations to execute. Each invocation specifies a tool name and its arguments. "
60
+ "These will be executed in parallel when possible, or serially when necessary."
61
+ }
62
+ },
63
+ "required": ["description", "invocations"]
64
+ }
65
+ }
66
+
67
+
68
+ async def handle_batch_tools(arguments: dict) -> List[TextContent]:
69
+ """Handle executing multiple tools in batch."""
70
+ # Import TOOL_HANDLERS here to avoid circular imports
71
+ from . import TOOL_HANDLERS
72
+
73
+ description = arguments.get("description")
74
+ invocations = arguments.get("invocations", [])
75
+ sequential = arguments.get("sequential", False)
76
+
77
+ if not description:
78
+ raise ValueError("Description must be provided")
79
+ if not invocations:
80
+ raise ValueError("Invocations list must not be empty")
81
+
82
+ # Validate that all tools exist before running any
83
+ for idx, invocation in enumerate(invocations):
84
+ tool_name = invocation.get("tool")
85
+ if not tool_name:
86
+ raise ValueError(f"Tool name missing in invocation #{idx+1}")
87
+
88
+ if tool_name not in TOOL_HANDLERS:
89
+ raise ValueError(f"Unknown tool '{tool_name}' in invocation #{idx+1}")
90
+
91
+ # Format the results header
92
+ header = f"Batch Operation: {description}\n"
93
+ execution_mode = "Sequential" if sequential else "Parallel"
94
+ header += f"Execution Mode: {execution_mode}\n"
95
+
96
+ # Combine all results
97
+ all_contents = [TextContent(type="text", text=header)]
98
+
99
+ if sequential:
100
+ # Sequential execution
101
+ for idx, invocation in enumerate(invocations):
102
+ tool_name = invocation.get("tool")
103
+ tool_args = invocation.get("arguments", {})
104
+
105
+ # Get the handler for this tool
106
+ handler = TOOL_HANDLERS[tool_name]
107
+
108
+ # Execute the tool and process results
109
+ result = await _execute_tool_with_error_handling(handler, tool_args, tool_name, idx)
110
+
111
+ # Add the result to our output
112
+ status = "SUCCESS" if result["success"] else "ERROR"
113
+ section_header = f"[{idx+1}] {tool_name} - {status}\n"
114
+ all_contents.append(TextContent(type="text", text=f"\n{section_header}{'=' * len(section_header)}\n"))
115
+
116
+ if result["success"]:
117
+ all_contents.extend(result["content"])
118
+ else:
119
+ all_contents.append(TextContent(
120
+ type="text",
121
+ text=f"Error: {result['error']}"
122
+ ))
123
+
124
+ # If a tool fails in sequential mode, we stop execution
125
+ if idx < len(invocations) - 1:
126
+ all_contents.append(TextContent(
127
+ type="text",
128
+ text=f"\nExecution stopped after failure. Remaining {len(invocations) - idx - 1} tools were not executed."
129
+ ))
130
+ break
131
+ else:
132
+ # Parallel execution
133
+ tasks = []
134
+
135
+ for idx, invocation in enumerate(invocations):
136
+ tool_name = invocation.get("tool")
137
+ tool_args = invocation.get("arguments", {})
138
+
139
+ # Create a task for each invocation
140
+ handler = TOOL_HANDLERS[tool_name]
141
+ task = asyncio.create_task(
142
+ _execute_tool_with_error_handling(handler, tool_args, tool_name, idx)
143
+ )
144
+ tasks.append(task)
145
+
146
+ # Wait for all tasks to complete
147
+ completed_results = await asyncio.gather(*tasks)
148
+
149
+ # Process results
150
+ for tool_result in completed_results:
151
+ tool_name = tool_result["tool_name"]
152
+ idx = tool_result["index"]
153
+ status = "SUCCESS" if tool_result["success"] else "ERROR"
154
+
155
+ # Add separator and header for this tool's results
156
+ section_header = f"[{idx+1}] {tool_name} - {status}\n"
157
+ all_contents.append(TextContent(type="text", text=f"\n{section_header}{'=' * len(section_header)}\n"))
158
+
159
+ # Add the actual content from the tool
160
+ if tool_result["success"]:
161
+ all_contents.extend(tool_result["content"])
162
+ else:
163
+ all_contents.append(TextContent(
164
+ type="text",
165
+ text=f"Error: {tool_result['error']}"
166
+ ))
167
+
168
+ return all_contents
169
+
170
+
171
+ async def _execute_tool_with_error_handling(handler, arguments, tool_name, index):
172
+ """Execute a single tool with error handling."""
173
+ try:
174
+ content = await handler(arguments)
175
+ return {
176
+ "tool_name": tool_name,
177
+ "index": index,
178
+ "success": True,
179
+ "content": content
180
+ }
181
+ except Exception as e:
182
+ return {
183
+ "tool_name": tool_name,
184
+ "index": index,
185
+ "success": False,
186
+ "error": str(e)
187
+ }
188
+
189
+
190
+ def think_tool():
191
+ return {
192
+ "name": "think",
193
+ "description": "Use the tool to think about something. "
194
+ "WHEN TO USE: When complex reasoning or brainstorming is needed without making any changes to files "
195
+ "or retrieving additional information. Useful for analyzing problems, planning approaches, evaluating "
196
+ "options, or organizing thoughts before taking action. "
197
+ "WHEN NOT TO USE: When immediate action is needed, when you need to query for new information, "
198
+ "or when a simple explanation would suffice. "
199
+ "RETURNS: The thoughts you provided, formatted as markdown. This tool does not retrieve new information "
200
+ "or make any changes to the repository - it simply records your reasoning process for reference. "
201
+ "This is particularly valuable when exploring complex bugs, designing architecture, or evaluating "
202
+ "multiple approaches to a problem.",
203
+ "inputSchema": {
204
+ "type": "object",
205
+ "properties": {
206
+ "thought": {
207
+ "type": "string",
208
+ "description": "Your detailed thoughts, analysis, reasoning, or brainstorming. Can include markdown "
209
+ "formatting for better readability, like bullet points, headings, or code blocks. "
210
+ "Examples: Analyzing the root cause of a bug, evaluating different API design choices, "
211
+ "planning refactoring steps, or brainstorming optimization strategies."
212
+ }
213
+ },
214
+ "required": ["thought"]
215
+ }
216
+ }
217
+
218
+
219
+ async def handle_think(arguments: dict) -> List[TextContent]:
220
+ """Handle recording a thought without making any changes."""
221
+ thought = arguments.get("thought")
222
+
223
+ if not thought:
224
+ raise ValueError("Thought must be provided")
225
+
226
+ # Format the thought in markdown
227
+ formatted_thought = f"""# Thought Process
228
+
229
+ {thought}
230
+
231
+ ---
232
+ *Note: This is a thinking tool used for reasoning and brainstorming. No changes were made to the repository.*
233
+ """
234
+
235
+ return [TextContent(
236
+ type="text",
237
+ text=formatted_thought
238
+ )]