quantalogic 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. quantalogic/__init__.py +20 -0
  2. quantalogic/agent.py +638 -0
  3. quantalogic/agent_config.py +138 -0
  4. quantalogic/coding_agent.py +83 -0
  5. quantalogic/event_emitter.py +223 -0
  6. quantalogic/generative_model.py +226 -0
  7. quantalogic/interactive_text_editor.py +190 -0
  8. quantalogic/main.py +185 -0
  9. quantalogic/memory.py +217 -0
  10. quantalogic/model_names.py +19 -0
  11. quantalogic/print_event.py +66 -0
  12. quantalogic/prompts.py +99 -0
  13. quantalogic/server/__init__.py +3 -0
  14. quantalogic/server/agent_server.py +633 -0
  15. quantalogic/server/models.py +60 -0
  16. quantalogic/server/routes.py +117 -0
  17. quantalogic/server/state.py +199 -0
  18. quantalogic/server/static/js/event_visualizer.js +430 -0
  19. quantalogic/server/static/js/quantalogic.js +571 -0
  20. quantalogic/server/templates/index.html +134 -0
  21. quantalogic/tool_manager.py +68 -0
  22. quantalogic/tools/__init__.py +46 -0
  23. quantalogic/tools/agent_tool.py +88 -0
  24. quantalogic/tools/download_http_file_tool.py +64 -0
  25. quantalogic/tools/edit_whole_content_tool.py +70 -0
  26. quantalogic/tools/elixir_tool.py +240 -0
  27. quantalogic/tools/execute_bash_command_tool.py +116 -0
  28. quantalogic/tools/input_question_tool.py +57 -0
  29. quantalogic/tools/language_handlers/__init__.py +21 -0
  30. quantalogic/tools/language_handlers/c_handler.py +33 -0
  31. quantalogic/tools/language_handlers/cpp_handler.py +33 -0
  32. quantalogic/tools/language_handlers/go_handler.py +33 -0
  33. quantalogic/tools/language_handlers/java_handler.py +37 -0
  34. quantalogic/tools/language_handlers/javascript_handler.py +42 -0
  35. quantalogic/tools/language_handlers/python_handler.py +29 -0
  36. quantalogic/tools/language_handlers/rust_handler.py +33 -0
  37. quantalogic/tools/language_handlers/scala_handler.py +33 -0
  38. quantalogic/tools/language_handlers/typescript_handler.py +42 -0
  39. quantalogic/tools/list_directory_tool.py +123 -0
  40. quantalogic/tools/llm_tool.py +119 -0
  41. quantalogic/tools/markitdown_tool.py +105 -0
  42. quantalogic/tools/nodejs_tool.py +515 -0
  43. quantalogic/tools/python_tool.py +469 -0
  44. quantalogic/tools/read_file_block_tool.py +140 -0
  45. quantalogic/tools/read_file_tool.py +79 -0
  46. quantalogic/tools/replace_in_file_tool.py +300 -0
  47. quantalogic/tools/ripgrep_tool.py +353 -0
  48. quantalogic/tools/search_definition_names.py +419 -0
  49. quantalogic/tools/task_complete_tool.py +35 -0
  50. quantalogic/tools/tool.py +146 -0
  51. quantalogic/tools/unified_diff_tool.py +387 -0
  52. quantalogic/tools/write_file_tool.py +97 -0
  53. quantalogic/utils/__init__.py +17 -0
  54. quantalogic/utils/ask_user_validation.py +12 -0
  55. quantalogic/utils/download_http_file.py +77 -0
  56. quantalogic/utils/get_coding_environment.py +15 -0
  57. quantalogic/utils/get_environment.py +26 -0
  58. quantalogic/utils/get_quantalogic_rules_content.py +19 -0
  59. quantalogic/utils/git_ls.py +121 -0
  60. quantalogic/utils/read_file.py +54 -0
  61. quantalogic/utils/read_http_text_content.py +101 -0
  62. quantalogic/xml_parser.py +242 -0
  63. quantalogic/xml_tool_parser.py +99 -0
  64. quantalogic-0.2.0.dist-info/LICENSE +201 -0
  65. quantalogic-0.2.0.dist-info/METADATA +1034 -0
  66. quantalogic-0.2.0.dist-info/RECORD +68 -0
  67. quantalogic-0.2.0.dist-info/WHEEL +4 -0
  68. quantalogic-0.2.0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,300 @@
1
+ """Tool for replacing sections in an existing file based on SEARCH/REPLACE blocks.
2
+
3
+ This tool:
4
+ 1. Parses multiple SEARCH/REPLACE blocks from a diff-like string.
5
+ 2. Attempts exact replacement in the target file first.
6
+ 3. If exact matches fail, attempts a similarity-based match by comparing the
7
+ search string to every substring of the file content of matching length.
8
+ 4. Replaces (or deletes if replace block is empty) the best-scoring substring
9
+ if it meets the specified similarity threshold.
10
+ 5. Tracks changes to avoid overlapping replacements.
11
+ 6. Writes the modified file content back to disk if changes are made.
12
+ """
13
+
14
+ import difflib
15
+ import os
16
+ from typing import List, Optional, Tuple
17
+
18
+ from loguru import logger
19
+ from pydantic import BaseModel, Field, ValidationError
20
+
21
+ from quantalogic.tools.tool import Tool, ToolArgument
22
+
23
+
24
+ class SearchReplaceBlock(BaseModel):
25
+ """Represents a single SEARCH/REPLACE block.
26
+
27
+ Attributes:
28
+ search (str):
29
+ Exact content to search for in the file (includes docstrings, whitespace, etc.).
30
+ replace (str):
31
+ Content to replace the `search` with. If empty, indicates deletion of the matched content.
32
+ similarity (float | None):
33
+ Stores the similarity ratio when a non-exact match is made.
34
+ """
35
+
36
+ search: str = Field(
37
+ ...,
38
+ description="Exact content to search for in the file.",
39
+ example="def old_function():\n pass",
40
+ )
41
+ replace: str = Field(
42
+ ...,
43
+ description="Content that replaces the `search`. Can be empty to delete the searched content.",
44
+ example="def new_function():\n print('Hello, World!')",
45
+ )
46
+ similarity: Optional[float] = Field(
47
+ None,
48
+ description="Similarity ratio when non-exact match is made.",
49
+ )
50
+
51
+ @classmethod
52
+ def from_block(cls, search: str, replace: str) -> "SearchReplaceBlock":
53
+ """Creates a SearchReplaceBlock instance from search and replace strings."""
54
+ return cls(search=search, replace=replace)
55
+
56
+
57
+ class ReplaceInFileTool(Tool):
58
+ """Tool for replacing sections in an existing file based on SEARCH/REPLACE blocks."""
59
+
60
+ name: str = "replace_in_file_tool"
61
+ description: str = (
62
+ "Updates sections of content in an existing file using SEARCH/REPLACE blocks. "
63
+ "If exact matches are not found, the tool attempts to find similar sections based on similarity. "
64
+ "Returns the updated content or an error."
65
+ )
66
+ need_validation: bool = True
67
+
68
+ # Adjust this threshold to allow more or less approximate matching
69
+ SIMILARITY_THRESHOLD: float = 0.85
70
+
71
+ arguments: list[ToolArgument] = [
72
+ ToolArgument(
73
+ name="path",
74
+ arg_type="string",
75
+ description=(
76
+ "The path of the file to modify (relative to the current working "
77
+ "directory). Using an absolute path is recommended."
78
+ ),
79
+ required=True,
80
+ example="./src/main.py",
81
+ ),
82
+ ToolArgument(
83
+ name="diff",
84
+ arg_type="string",
85
+ description=(
86
+ "Define one or more SEARCH/REPLACE blocks to specify the exact changes to be made in the code. "
87
+ "Each block must follow this precise format:\n"
88
+ "```\n"
89
+ "<<<<<<< SEARCH\n"
90
+ "[exact content to find, characters must match EXACTLY including whitespace, indentation, line endings]\n"
91
+ "=======\n"
92
+ "[new content to replace with]\n"
93
+ ">>>>>>> REPLACE\n"
94
+ "```\n\n"
95
+ "### Critical Rules:\n"
96
+ "1. **Exact Matching**:\n"
97
+ " - The SEARCH content must match the corresponding section in the file exactly:\n"
98
+ " - This includes all characters, whitespace, indentation, and line endings.\n"
99
+ " - Ensure all comments, docstrings, and other relevant text are included.\n"
100
+ "2. **Replacement Mechanics**:\n"
101
+ " - Each SEARCH/REPLACE block will only replace the first occurrence found.\n"
102
+ " - To make multiple changes, create separate unique SEARCH/REPLACE blocks for each.\n"
103
+ " - Include just enough context in each SEARCH section to uniquely identify the lines needing change.\n"
104
+ "3. **Conciseness and Clarity**:\n"
105
+ " - Break larger SEARCH/REPLACE blocks into smaller segments that each modify a specific part of the file.\n"
106
+ " - Include only the lines that change and a few surrounding lines if necessary for uniqueness.\n"
107
+ " - Ensure each line is complete; do not truncate lines mid-way to prevent matching failures.\n"
108
+ "4. **Special Operations**:\n"
109
+ " - An empty SEARCH/REPLACE block will result in the deletion of the corresponding line.\n"
110
+ " - If a block is missing entirely, the file will remain unchanged.\n"
111
+ " - To move code: Use two blocks—one to delete from the original location and another to insert at the new location.\n"
112
+ " - To delete code: Use an empty REPLACE section.\n"
113
+ ),
114
+ required=True,
115
+ example=(
116
+ "<<<<<<< SEARCH\n"
117
+ "def old_function():\n"
118
+ " pass\n"
119
+ "=======\n"
120
+ "def new_function():\n"
121
+ " print('Hello, World!')\n"
122
+ ">>>>>>> REPLACE\n"
123
+ ),
124
+ ),
125
+ ]
126
+
127
+ def parse_diff(self, diff: str) -> list[SearchReplaceBlock]:
128
+ """Parses the diff string into a list of SearchReplaceBlock instances."""
129
+ if not diff or not diff.strip():
130
+ raise ValueError("Empty or invalid diff string provided")
131
+
132
+ blocks: list[SearchReplaceBlock] = []
133
+ lines = diff.splitlines()
134
+ idx = 0
135
+
136
+ while idx < len(lines):
137
+ line = lines[idx].strip()
138
+ if line == "<<<<<<< SEARCH":
139
+ search_lines = []
140
+ idx += 1
141
+
142
+ while idx < len(lines) and lines[idx].strip() != "=======":
143
+ search_lines.append(lines[idx])
144
+ idx += 1
145
+
146
+ if idx >= len(lines):
147
+ raise ValueError("Invalid diff format: Missing '=======' marker")
148
+
149
+ replace_lines = []
150
+ idx += 1
151
+
152
+ while idx < len(lines) and lines[idx].strip() != ">>>>>>> REPLACE":
153
+ replace_lines.append(lines[idx])
154
+ idx += 1
155
+
156
+ if idx >= len(lines):
157
+ raise ValueError("Invalid diff format: Missing '>>>>>>> REPLACE' marker")
158
+
159
+ search_content = "\n".join(search_lines).rstrip()
160
+ replace_content = "\n".join(replace_lines).rstrip()
161
+
162
+ try:
163
+ block = SearchReplaceBlock.from_block(search=search_content, replace=replace_content)
164
+ blocks.append(block)
165
+ except ValidationError as ve:
166
+ raise ValueError(f"Invalid block format: {ve}")
167
+
168
+ idx += 1
169
+
170
+ if not blocks:
171
+ raise ValueError("No valid SEARCH/REPLACE blocks found in the diff")
172
+
173
+ return blocks
174
+
175
+ def execute(self, path: str, diff: str) -> str:
176
+ """Replaces sections in a file based on SEARCH/REPLACE blocks with similarity-based fallback."""
177
+ if not path:
178
+ return "Error: File path cannot be empty"
179
+
180
+ if not diff:
181
+ return "Error: Diff content cannot be empty"
182
+
183
+ try:
184
+ path = os.path.expanduser(path) if path.startswith("~") else path
185
+ path = os.path.abspath(path) if not os.path.isabs(path) else path
186
+
187
+ if not os.path.isfile(path):
188
+ return f"Error: File not found: '{path}'"
189
+
190
+ blocks = self.parse_diff(diff)
191
+
192
+ try:
193
+ with open(path, encoding="utf-8") as file:
194
+ content = file.read()
195
+ except UnicodeDecodeError:
196
+ return f"Error: File must be UTF-8 encoded: '{path}'"
197
+ except Exception as e:
198
+ return f"Error: Failed to read file '{path}': {str(e) or 'Unknown error'}"
199
+
200
+ original_content = content
201
+ changes: List[Tuple[int, int]] = []
202
+
203
+ for idx, block in enumerate(blocks, 1):
204
+ if not block.search:
205
+ if block.replace:
206
+ content += f"\n{block.replace}"
207
+ logger.info(f"Block {idx}: Appended content")
208
+ continue
209
+
210
+ match_found = False
211
+ if block.search in content:
212
+ start = content.find(block.search)
213
+ end = start + len(block.search)
214
+ if not self._is_overlapping(changes, start, end):
215
+ if block.replace:
216
+ content = f"{content[:start]}{block.replace}{content[end:]}"
217
+ else:
218
+ content = f"{content[:start]}{content[end:]}"
219
+ changes.append((start, start + len(block.replace) if block.replace else start))
220
+ match_found = True
221
+ logger.info(f"Block {idx}: Exact match {'replaced' if block.replace else 'deleted'}")
222
+
223
+ if not match_found:
224
+ similarity, matched_str = self.find_similar_match(block.search, content)
225
+ if similarity >= self.SIMILARITY_THRESHOLD and matched_str:
226
+ start = content.find(matched_str)
227
+ end = start + len(matched_str)
228
+ if not self._is_overlapping(changes, start, end):
229
+ block.similarity = similarity
230
+ if block.replace:
231
+ content = f"{content[:start]}{block.replace}{content[end:]}"
232
+ else:
233
+ content = f"{content[:start]}{content[end:]}"
234
+ changes.append((start, start + len(block.replace) if block.replace else start))
235
+ logger.info(
236
+ f"Block {idx}: Similar match (similarity={similarity:.1%}) "
237
+ f"{'replaced' if block.replace else 'deleted'}"
238
+ )
239
+ match_found = True
240
+
241
+ if not match_found:
242
+ return f"Error: No matching content found for block {idx}. " f"Best similarity: {similarity:.1%}"
243
+
244
+ if content == original_content:
245
+ return f"No changes needed in '{path}'"
246
+
247
+ try:
248
+ with open(path, "w", encoding="utf-8") as file:
249
+ file.write(content)
250
+ except Exception as e:
251
+ return f"Error: Failed to write changes to '{path}': {str(e) or 'Unknown error'}"
252
+
253
+ message = [f"Successfully modified '{path}'"]
254
+ for idx, block in enumerate(blocks, 1):
255
+ status = "Exact match" if block.similarity is None else f"Similar match ({block.similarity:.1%})"
256
+ message.append(f"- Block {idx}: {status}")
257
+
258
+ return "\n".join(message)
259
+
260
+ except (OSError, ValueError) as e:
261
+ error_msg = str(e)
262
+ logger.error(error_msg)
263
+ return f"Error: {error_msg or 'Unknown error occurred'}"
264
+ except Exception as e:
265
+ error_msg = str(e)
266
+ logger.exception("Unexpected error")
267
+ return f"Error: Unexpected error occurred - {error_msg or 'Unknown error'}"
268
+
269
+ def find_similar_match(self, search: str, content: str) -> Tuple[float, str]:
270
+ """Finds the most similar substring in content compared to search."""
271
+ if not search or not content:
272
+ return 0.0, ""
273
+
274
+ search_lines = search.splitlines()
275
+ content_lines = content.splitlines()
276
+
277
+ if len(search_lines) > len(content_lines):
278
+ return 0.0, ""
279
+
280
+ max_similarity = 0.0
281
+ best_match = ""
282
+
283
+ for i in range(len(content_lines) - len(search_lines) + 1):
284
+ candidate = "\n".join(content_lines[i : i + len(search_lines)])
285
+ similarity = difflib.SequenceMatcher(None, search, candidate).ratio()
286
+
287
+ if similarity > max_similarity:
288
+ max_similarity = similarity
289
+ best_match = candidate
290
+
291
+ return max_similarity, best_match
292
+
293
+ def _is_overlapping(self, changes: List[Tuple[int, int]], start: int, end: int) -> bool:
294
+ """Checks if the given range overlaps with any existing changes."""
295
+ return any(not (end <= change_start or start >= change_end) for change_start, change_end in changes)
296
+
297
+
298
+ if __name__ == "__main__":
299
+ tool = ReplaceInFileTool()
300
+ print(tool.to_markdown())
@@ -0,0 +1,353 @@
1
+ """A tool to search for text blocks in files using ripgrep."""
2
+
3
+ import json
4
+ import os
5
+ import subprocess
6
+ from pathlib import Path
7
+ from typing import Any, Dict, List, Optional
8
+
9
+ from loguru import logger
10
+ from pathspec import PathSpec
11
+ from pydantic import ValidationError
12
+
13
+ from quantalogic.tools.tool import Tool, ToolArgument
14
+
15
+ MAX_LINE_LENGTH = 120 # Maximum length for each line before truncation
16
+
17
+
18
+ class RipgrepTool(Tool):
19
+ """Search files using ripgrep with regex and file filters."""
20
+
21
+ name: str = "ripgrep_search_tool"
22
+ description: str = "Search files using ripgrep with regex and file filters."
23
+
24
+ arguments: list = [
25
+ ToolArgument(
26
+ name="cwd",
27
+ arg_type="string",
28
+ description="Base path for relative searches",
29
+ required=False,
30
+ default=None,
31
+ ),
32
+ ToolArgument(
33
+ name="directory_path",
34
+ arg_type="string",
35
+ description="The directory path to search in.",
36
+ required=True,
37
+ ),
38
+ ToolArgument(
39
+ name="regex_rust_syntax",
40
+ arg_type="string",
41
+ description="The regex pattern to search for (Regex must be in Rust syntax).",
42
+ required=True,
43
+ ),
44
+ ToolArgument(
45
+ name="file_pattern",
46
+ arg_type="string",
47
+ description="Optional glob pattern to filter files.",
48
+ required=False,
49
+ default="**/*",
50
+ ),
51
+ ToolArgument(
52
+ name="context_lines",
53
+ arg_type="string",
54
+ description="Number of context lines to include before and after matches.",
55
+ required=True,
56
+ default="4",
57
+ ),
58
+ ]
59
+
60
+ model_config = {"extra": "allow"}
61
+
62
+ def _load_gitignore_spec(self, path: Path) -> PathSpec:
63
+ """Load .gitignore patterns from directory and all parent directories."""
64
+ from pathspec import PathSpec
65
+ from pathspec.patterns import GitWildMatchPattern
66
+
67
+ ignore_patterns = []
68
+ current = path.absolute()
69
+
70
+ # Traverse up the directory tree
71
+ while current != current.parent: # Stop at root
72
+ gitignore_path = current / ".gitignore"
73
+ if gitignore_path.exists():
74
+ with open(gitignore_path) as f:
75
+ # Filter out empty lines and comments
76
+ patterns = [line.strip() for line in f.readlines() if line.strip() and not line.startswith("#")]
77
+ ignore_patterns.extend(patterns)
78
+ current = current.parent
79
+
80
+ return PathSpec.from_lines(GitWildMatchPattern, ignore_patterns)
81
+
82
+ def execute(
83
+ self,
84
+ cwd: Optional[str] = None,
85
+ directory_path: str = ".",
86
+ regex_rust_syntax: str = "search",
87
+ file_pattern: str = "**/*",
88
+ context_lines: str = "1",
89
+ ) -> str:
90
+ """Execute the ripgrep search and return formatted results.
91
+
92
+ Args:
93
+ cwd (Optional[str]): The current working directory for relative path calculation.
94
+ directory_path (str): The directory path to search in.
95
+ regex_rust_syntax (str): The regex pattern to search for (Rust syntax).
96
+ file_pattern (str): Optional glob pattern to filter files.
97
+ context_lines (str): Number of context lines to include before and after matches.
98
+
99
+ Returns:
100
+ str: Formatted search results with context.
101
+
102
+ Raises:
103
+ ValueError: If the directory path is invalid.
104
+ RuntimeError: If ripgrep is not found or fails to execute.
105
+ """
106
+ # Validate and normalize the directory path
107
+ directory_path = str(Path(directory_path).resolve())
108
+ if not os.path.isdir(directory_path):
109
+ raise ValueError(f"Directory not found: {directory_path}")
110
+
111
+ # Use current working directory if not specified
112
+ cwd = str(Path(cwd or directory_path).resolve())
113
+ rg_path = self._find_rg_binary()
114
+ if not rg_path:
115
+ raise RuntimeError("Could not find ripgrep binary.")
116
+
117
+ # Load .gitignore patterns
118
+ gitignore_spec = self._load_gitignore_spec(Path(directory_path))
119
+
120
+ args = [
121
+ "--json", # Output in JSON format for easier parsing
122
+ "-e",
123
+ regex_rust_syntax, # Regex pattern to search for
124
+ "--glob",
125
+ file_pattern, # File pattern to filter files
126
+ "--context",
127
+ context_lines, # Include context lines before and after matches
128
+ directory_path, # Directory to search in
129
+ ]
130
+
131
+ # Find files matching the pattern
132
+ files = list(Path(directory_path).rglob(file_pattern))
133
+ filtered_files = [str(f) for f in files if f.is_file() and not gitignore_spec.match_file(f)]
134
+
135
+ if not filtered_files:
136
+ return "No files matching the pattern (after .gitignore filtering)"
137
+
138
+ try:
139
+ logger.info(f"Executing ripgrep with args: {args}")
140
+ # Add filtered files to ripgrep command
141
+ args.extend(filtered_files)
142
+ output = subprocess.check_output([rg_path] + args, text=True, cwd=cwd)
143
+ except subprocess.CalledProcessError as e:
144
+ if e.returncode == 1:
145
+ return "No results found."
146
+ elif e.returncode == 2:
147
+ return f"Invalid regex pattern: {regex_rust_syntax}"
148
+ raise RuntimeError(f"Ripgrep process error (code {e.returncode}): {e}")
149
+
150
+ results = self._parse_rg_output(output, cwd)
151
+ return self._format_results(results, cwd)
152
+
153
+ def _find_rg_binary(self) -> Optional[str]:
154
+ """Locate the ripgrep binary in common installation paths.
155
+
156
+ Returns:
157
+ Optional[str]: Path to the ripgrep binary, or None if not found.
158
+ """
159
+ bin_name = "rg.exe" if os.name == "nt" else "rg"
160
+
161
+ # Check environment variable first
162
+ env_path = os.environ.get("RIPGREP_PATH")
163
+ if env_path and Path(env_path).exists():
164
+ return env_path
165
+
166
+ # Common system paths
167
+ system_paths = []
168
+ if os.name == "nt": # Windows
169
+ system_paths.extend(
170
+ [
171
+ Path(os.environ.get("ProgramFiles", "C:\\Program Files"), "ripgrep", bin_name),
172
+ Path(os.environ.get("ProgramFiles(x86)", "C:\\Program Files (x86)"), "ripgrep", bin_name),
173
+ ]
174
+ )
175
+ else: # Unix-like systems
176
+ system_paths.extend(
177
+ [
178
+ Path("/usr/local/bin", bin_name),
179
+ Path("/usr/bin", bin_name),
180
+ Path("/opt/local/bin", bin_name),
181
+ Path.home() / ".cargo" / "bin" / bin_name, # Common Rust installation path
182
+ ]
183
+ )
184
+
185
+ # VSCode/Node.js paths
186
+ node_paths = [
187
+ Path("node_modules", "@vscode", "ripgrep", "bin", bin_name),
188
+ Path("node_modules", "vscode-ripgrep", "bin", bin_name),
189
+ Path("node_modules.asar.unpacked", "vscode-ripgrep", "bin", bin_name),
190
+ Path("node_modules.asar.unpacked", "@vscode", "ripgrep", "bin", bin_name),
191
+ ]
192
+
193
+ # Check all possible paths
194
+ for path in system_paths + node_paths:
195
+ full_path = Path(__file__).parent.parent / path if str(path).startswith("node_modules") else path
196
+ if full_path.exists():
197
+ logger.info(f"Found ripgrep at: {full_path}")
198
+ return str(full_path)
199
+
200
+ # Check system PATH using which/where
201
+ try:
202
+ command = "where" if os.name == "nt" else "which"
203
+ rg_path = subprocess.check_output([command, bin_name], text=True).strip()
204
+ if rg_path:
205
+ logger.info(f"Found ripgrep in PATH at: {rg_path}")
206
+ return rg_path
207
+ except subprocess.CalledProcessError:
208
+ logger.debug("Ripgrep not found in system PATH")
209
+
210
+ logger.warning("Could not locate ripgrep binary")
211
+ return None
212
+
213
+ def _parse_rg_output(self, output: str, cwd: str) -> List[Dict[str, Any]]:
214
+ """Parse the JSON output from ripgrep into structured results.
215
+
216
+ Args:
217
+ output (str): The raw JSON output from ripgrep.
218
+ cwd (str): The current working directory for relative path calculation.
219
+
220
+ Returns:
221
+ List[Dict[str, Any]]: A list of parsed search results.
222
+
223
+ Raises:
224
+ ValueError: If the JSON data structure is invalid or missing required fields.
225
+ """
226
+ results = []
227
+ current_result = None
228
+
229
+ for line in output.strip().split("\n"):
230
+ if not line.strip():
231
+ continue
232
+
233
+ try:
234
+ data = json.loads(line)
235
+ if not isinstance(data, dict):
236
+ logger.warning(f"Skipping non-dict JSON line: {line}")
237
+ continue
238
+
239
+ if data.get("type") == "match":
240
+ try:
241
+ # Validate required fields in match data
242
+ match_data = data["data"]
243
+ if not all(key in match_data for key in ["path", "line_number", "submatches", "lines"]):
244
+ raise ValueError("Missing required fields in match data")
245
+
246
+ if current_result:
247
+ results.append(current_result)
248
+ current_result = {
249
+ "file": os.path.relpath(match_data["path"]["text"], cwd),
250
+ "line": match_data["line_number"],
251
+ "column": match_data["submatches"][0]["start"],
252
+ "match": match_data["lines"]["text"].strip(),
253
+ "before_context": [],
254
+ "after_context": [],
255
+ }
256
+ except (KeyError, ValueError) as e:
257
+ logger.error(f"Invalid match data structure: {e}\nLine: {line}")
258
+ continue
259
+
260
+ elif data.get("type") == "context" and current_result:
261
+ try:
262
+ context_data = data["data"]
263
+ if not all(key in context_data for key in ["line_number", "lines"]):
264
+ raise ValueError("Missing required fields in context data")
265
+
266
+ if context_data["line_number"] < current_result["line"]:
267
+ current_result["before_context"].append(context_data["lines"]["text"].strip())
268
+ else:
269
+ current_result["after_context"].append(context_data["lines"]["text"].strip())
270
+ except (KeyError, ValueError) as e:
271
+ logger.error(f"Invalid context data structure: {e}\nLine: {line}")
272
+ continue
273
+
274
+ except json.JSONDecodeError as e:
275
+ logger.warning(f"Failed to parse JSON line: {line}\nError: {e}")
276
+ continue
277
+ except Exception as e:
278
+ logger.error(f"Unexpected error parsing line: {line}\nError: {e}")
279
+ continue
280
+
281
+ if current_result:
282
+ results.append(current_result)
283
+ return results
284
+
285
+ def _format_results(self, results: List[Dict[str, Any]], cwd: str) -> str:
286
+ """Format the parsed search results into a readable string.
287
+
288
+ Args:
289
+ results (List[Dict[str, Any]]): The parsed search results.
290
+ cwd (str): The current working directory for relative path calculation.
291
+
292
+ Returns:
293
+ str: Formatted search results with context and line numbers.
294
+ """
295
+ if not results:
296
+ return "No results found."
297
+
298
+ formatted_output = []
299
+ grouped_results: Dict[str, List[Dict[str, Any]]] = {}
300
+
301
+ # Group results by file
302
+ for result in results:
303
+ if result["file"] not in grouped_results:
304
+ grouped_results[result["file"]] = []
305
+ grouped_results[result["file"]].append(result)
306
+
307
+ # Format each group
308
+ for file, file_results in grouped_results.items():
309
+ formatted_output.append(f"\nšŸ“„ File: {file}")
310
+ formatted_output.append("=" * (len(file) + 8)) # Adjust divider length for "File: " prefix
311
+
312
+ for result in file_results:
313
+ # Add context before the match with line numbers
314
+ for i, line in enumerate(
315
+ result["before_context"], start=result["line"] - len(result["before_context"])
316
+ ):
317
+ truncated_line = (line[:MAX_LINE_LENGTH] + "...") if len(line) > MAX_LINE_LENGTH else line
318
+ formatted_output.append(f"{i:4d} │ {truncated_line}")
319
+
320
+ # Highlight the match line with line number
321
+ truncated_match = (
322
+ (result["match"][:MAX_LINE_LENGTH] + "...")
323
+ if len(result["match"]) > MAX_LINE_LENGTH
324
+ else result["match"]
325
+ )
326
+ formatted_output.append(f"{result['line']:4d} ā–¶ {truncated_match}") # Use ā–¶ to highlight match
327
+
328
+ # Add context after the match with line numbers
329
+ for i, line in enumerate(result["after_context"], start=result["line"] + 1):
330
+ truncated_line = (line[:MAX_LINE_LENGTH] + "...") if len(line) > MAX_LINE_LENGTH else line
331
+ formatted_output.append(f"{i:4d} │ {truncated_line}")
332
+
333
+ formatted_output.append("─" * 80) # Add a visual separator between matches
334
+
335
+ if not formatted_output:
336
+ return "No results found."
337
+
338
+ # Add summary of results
339
+ total_matches = sum(len(matches) for matches in grouped_results.values())
340
+ formatted_output.insert(0, f"šŸ” Found {total_matches} matches across {len(grouped_results)} files\n")
341
+
342
+ return "\n".join(formatted_output).strip()
343
+
344
+
345
+ # Example usage:
346
+ if __name__ == "__main__":
347
+ try:
348
+ tool = RipgrepTool()
349
+ print(tool.execute(directory_path=".", regex="search", file_pattern="**/*.py", context_lines="2"))
350
+ except ValidationError as e:
351
+ logger.error(f"Validation error: {e}")
352
+ except RuntimeError as e:
353
+ logger.error(f"Runtime error: {e}")