tree-sitter-analyzer 1.2.3__py3-none-any.whl → 1.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tree-sitter-analyzer might be problematic. Click here for more details.

@@ -0,0 +1,269 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ list_files MCP Tool (fd wrapper)
4
+
5
+ Safely list files/directories based on name patterns and constraints, using fd.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import time
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ from ..utils.error_handler import handle_mcp_errors
15
+ from . import fd_rg_utils
16
+ from .base_tool import BaseMCPTool
17
+
18
+
19
+ class ListFilesTool(BaseMCPTool):
20
+ """MCP tool that wraps fd to list files with safety limits."""
21
+
22
+ def get_tool_definition(self) -> dict[str, Any]:
23
+ return {
24
+ "name": "list_files",
25
+ "description": "List files and directories using fd with advanced filtering options. Supports glob patterns, file types, size filters, and more. Returns file paths with metadata or just counts.",
26
+ "inputSchema": {
27
+ "type": "object",
28
+ "properties": {
29
+ "roots": {
30
+ "type": "array",
31
+ "items": {"type": "string"},
32
+ "description": "Directory paths to search in. Must be within project boundaries for security. Example: ['.', 'src/', '/path/to/dir']",
33
+ },
34
+ "pattern": {
35
+ "type": "string",
36
+ "description": "Search pattern for file/directory names. Use with 'glob' for shell patterns or regex. Example: '*.py', 'test_*', 'main.js'",
37
+ },
38
+ "glob": {
39
+ "type": "boolean",
40
+ "default": False,
41
+ "description": "Treat pattern as glob (shell wildcard) instead of regex. True for '*.py', False for '.*\\.py$'",
42
+ },
43
+ "types": {
44
+ "type": "array",
45
+ "items": {"type": "string"},
46
+ "description": "File types to include. Values: 'f'=files, 'd'=directories, 'l'=symlinks, 'x'=executable, 'e'=empty. Example: ['f'] for files only",
47
+ },
48
+ "extensions": {
49
+ "type": "array",
50
+ "items": {"type": "string"},
51
+ "description": "File extensions to include (without dots). Example: ['py', 'js', 'md'] for Python, JavaScript, and Markdown files",
52
+ },
53
+ "exclude": {
54
+ "type": "array",
55
+ "items": {"type": "string"},
56
+ "description": "Patterns to exclude from results. Example: ['*.tmp', '__pycache__', 'node_modules'] to skip temporary and cache files",
57
+ },
58
+ "depth": {
59
+ "type": "integer",
60
+ "description": "Maximum directory depth to search. 1=current level only, 2=one level deep, etc. Useful to avoid deep recursion",
61
+ },
62
+ "follow_symlinks": {
63
+ "type": "boolean",
64
+ "default": False,
65
+ "description": "Follow symbolic links during search. False=skip symlinks (safer), True=follow them (may cause loops)",
66
+ },
67
+ "hidden": {
68
+ "type": "boolean",
69
+ "default": False,
70
+ "description": "Include hidden files/directories (starting with dot). False=skip .git, .env, True=include all",
71
+ },
72
+ "no_ignore": {
73
+ "type": "boolean",
74
+ "default": False,
75
+ "description": "Ignore .gitignore and similar files. False=respect ignore files, True=search everything",
76
+ },
77
+ "size": {
78
+ "type": "array",
79
+ "items": {"type": "string"},
80
+ "description": "File size filters. Format: '+10M'=larger than 10MB, '-1K'=smaller than 1KB, '100B'=exactly 100 bytes. Units: B, K, M, G",
81
+ },
82
+ "changed_within": {
83
+ "type": "string",
84
+ "description": "Files modified within timeframe. Format: '1d'=1 day, '2h'=2 hours, '30m'=30 minutes, '1w'=1 week",
85
+ },
86
+ "changed_before": {
87
+ "type": "string",
88
+ "description": "Files modified before timeframe. Same format as changed_within. Useful for finding old files",
89
+ },
90
+ "full_path_match": {
91
+ "type": "boolean",
92
+ "default": False,
93
+ "description": "Match pattern against full path instead of just filename. True for 'src/main.py', False for 'main.py'",
94
+ },
95
+ "absolute": {
96
+ "type": "boolean",
97
+ "default": True,
98
+ "description": "Return absolute paths. True='/full/path/file.py', False='./file.py'. Absolute paths are more reliable",
99
+ },
100
+ "limit": {
101
+ "type": "integer",
102
+ "description": "Maximum number of results to return. Default 2000, max 10000. Use to prevent overwhelming output",
103
+ },
104
+ "count_only": {
105
+ "type": "boolean",
106
+ "default": False,
107
+ "description": "Return only the total count of matching files instead of file details. Useful for quick statistics",
108
+ },
109
+ },
110
+ "required": ["roots"],
111
+ "additionalProperties": False,
112
+ },
113
+ }
114
+
115
+ def _validate_roots(self, roots: list[str]) -> list[str]:
116
+ if not roots or not isinstance(roots, list):
117
+ raise ValueError("roots must be a non-empty array of strings")
118
+ validated: list[str] = []
119
+ for r in roots:
120
+ if not isinstance(r, str) or not r.strip():
121
+ raise ValueError("root entries must be non-empty strings")
122
+ # Resolve and enforce boundary
123
+ resolved = self.path_resolver.resolve(r)
124
+ is_valid, error = self.security_validator.validate_directory_path(
125
+ resolved, must_exist=True
126
+ )
127
+ if not is_valid:
128
+ raise ValueError(f"Invalid root '{r}': {error}")
129
+ validated.append(resolved)
130
+ return validated
131
+
132
+ def validate_arguments(self, arguments: dict[str, Any]) -> bool:
133
+ if "roots" not in arguments:
134
+ raise ValueError("roots is required")
135
+ roots = arguments["roots"]
136
+ if not isinstance(roots, list):
137
+ raise ValueError("roots must be an array")
138
+ # Basic type checks for optional fields
139
+ for key in [
140
+ "pattern",
141
+ "changed_within",
142
+ "changed_before",
143
+ ]:
144
+ if key in arguments and not isinstance(arguments[key], str):
145
+ raise ValueError(f"{key} must be a string")
146
+ for key in [
147
+ "glob",
148
+ "follow_symlinks",
149
+ "hidden",
150
+ "no_ignore",
151
+ "full_path_match",
152
+ "absolute",
153
+ ]:
154
+ if key in arguments and not isinstance(arguments[key], bool):
155
+ raise ValueError(f"{key} must be a boolean")
156
+ if "depth" in arguments and not isinstance(arguments["depth"], int):
157
+ raise ValueError("depth must be an integer")
158
+ if "limit" in arguments and not isinstance(arguments["limit"], int):
159
+ raise ValueError("limit must be an integer")
160
+ for arr in ["types", "extensions", "exclude", "size"]:
161
+ if arr in arguments and not (
162
+ isinstance(arguments[arr], list)
163
+ and all(isinstance(x, str) for x in arguments[arr])
164
+ ):
165
+ raise ValueError(f"{arr} must be an array of strings")
166
+ return True
167
+
168
+ @handle_mcp_errors("list_files")
169
+ async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
170
+ self.validate_arguments(arguments)
171
+ roots = self._validate_roots(arguments["roots"]) # normalized absolutes
172
+
173
+ limit = fd_rg_utils.clamp_int(
174
+ arguments.get("limit"),
175
+ fd_rg_utils.DEFAULT_RESULTS_LIMIT,
176
+ fd_rg_utils.MAX_RESULTS_HARD_CAP,
177
+ )
178
+
179
+ cmd = fd_rg_utils.build_fd_command(
180
+ pattern=arguments.get("pattern"),
181
+ glob=bool(arguments.get("glob", False)),
182
+ types=arguments.get("types"),
183
+ extensions=arguments.get("extensions"),
184
+ exclude=arguments.get("exclude"),
185
+ depth=arguments.get("depth"),
186
+ follow_symlinks=bool(arguments.get("follow_symlinks", False)),
187
+ hidden=bool(arguments.get("hidden", False)),
188
+ no_ignore=bool(arguments.get("no_ignore", False)),
189
+ size=arguments.get("size"),
190
+ changed_within=arguments.get("changed_within"),
191
+ changed_before=arguments.get("changed_before"),
192
+ full_path_match=bool(arguments.get("full_path_match", False)),
193
+ absolute=True, # unify output to absolute paths
194
+ limit=limit,
195
+ roots=roots,
196
+ )
197
+
198
+ # Use fd default path format (one per line). We'll determine is_dir and ext via Path
199
+ started = time.time()
200
+ rc, out, err = await fd_rg_utils.run_command_capture(cmd)
201
+ elapsed_ms = int((time.time() - started) * 1000)
202
+
203
+ if rc != 0:
204
+ message = err.decode("utf-8", errors="replace").strip() or "fd failed"
205
+ return {"success": False, "error": message, "returncode": rc}
206
+
207
+ lines = [
208
+ line.strip()
209
+ for line in out.decode("utf-8", errors="replace").splitlines()
210
+ if line.strip()
211
+ ]
212
+
213
+ # Check if count_only mode is requested
214
+ if arguments.get("count_only", False):
215
+ total_count = len(lines)
216
+ # Apply hard cap for counting as well
217
+ if total_count > fd_rg_utils.MAX_RESULTS_HARD_CAP:
218
+ total_count = fd_rg_utils.MAX_RESULTS_HARD_CAP
219
+ truncated = True
220
+ else:
221
+ truncated = False
222
+
223
+ return {
224
+ "success": True,
225
+ "count_only": True,
226
+ "total_count": total_count,
227
+ "truncated": truncated,
228
+ "elapsed_ms": elapsed_ms,
229
+ }
230
+
231
+ # Truncate defensively even if fd didn't
232
+ truncated = False
233
+ if len(lines) > fd_rg_utils.MAX_RESULTS_HARD_CAP:
234
+ lines = lines[: fd_rg_utils.MAX_RESULTS_HARD_CAP]
235
+ truncated = True
236
+
237
+ results: list[dict[str, Any]] = []
238
+ for p in lines:
239
+ try:
240
+ path_obj = Path(p)
241
+ is_dir = path_obj.is_dir()
242
+ ext = path_obj.suffix[1:] if path_obj.suffix else None
243
+ size_bytes = None
244
+ mtime = None
245
+ try:
246
+ if not is_dir and path_obj.exists():
247
+ size_bytes = path_obj.stat().st_size
248
+ mtime = int(path_obj.stat().st_mtime)
249
+ except (OSError, ValueError): # nosec B110
250
+ pass
251
+ results.append(
252
+ {
253
+ "path": str(path_obj.resolve()),
254
+ "is_dir": is_dir,
255
+ "size_bytes": size_bytes,
256
+ "mtime": mtime,
257
+ "ext": ext,
258
+ }
259
+ )
260
+ except (OSError, ValueError): # nosec B112
261
+ continue
262
+
263
+ return {
264
+ "success": True,
265
+ "count": len(results),
266
+ "truncated": truncated,
267
+ "elapsed_ms": elapsed_ms,
268
+ "results": results,
269
+ }
@@ -0,0 +1,334 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ search_content MCP Tool (ripgrep wrapper)
4
+
5
+ Search content in files under roots or an explicit file list using ripgrep --json.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import time
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ from ..utils.error_handler import handle_mcp_errors
15
+ from . import fd_rg_utils
16
+ from .base_tool import BaseMCPTool
17
+
18
+
19
+ class SearchContentTool(BaseMCPTool):
20
+ """MCP tool that wraps ripgrep to search content with safety limits."""
21
+
22
+ def get_tool_definition(self) -> dict[str, Any]:
23
+ return {
24
+ "name": "search_content",
25
+ "description": "Search text content inside files using ripgrep. Supports regex patterns, case sensitivity, context lines, and various output formats. Can search in directories or specific files.",
26
+ "inputSchema": {
27
+ "type": "object",
28
+ "properties": {
29
+ "roots": {
30
+ "type": "array",
31
+ "items": {"type": "string"},
32
+ "description": "Directory paths to search in recursively. Alternative to 'files'. Example: ['.', 'src/', 'tests/']",
33
+ },
34
+ "files": {
35
+ "type": "array",
36
+ "items": {"type": "string"},
37
+ "description": "Specific file paths to search in. Alternative to 'roots'. Example: ['main.py', 'config.json']",
38
+ },
39
+ "query": {
40
+ "type": "string",
41
+ "description": "Text pattern to search for. Can be literal text or regex depending on settings. Example: 'function', 'class\\s+\\w+', 'TODO:'",
42
+ },
43
+ "case": {
44
+ "type": "string",
45
+ "enum": ["smart", "insensitive", "sensitive"],
46
+ "default": "smart",
47
+ "description": "Case sensitivity mode. 'smart'=case-insensitive unless uppercase letters present, 'insensitive'=always ignore case, 'sensitive'=exact case match",
48
+ },
49
+ "fixed_strings": {
50
+ "type": "boolean",
51
+ "default": False,
52
+ "description": "Treat query as literal string instead of regex. True for exact text matching, False for regex patterns",
53
+ },
54
+ "word": {
55
+ "type": "boolean",
56
+ "default": False,
57
+ "description": "Match whole words only. True finds 'test' but not 'testing', False finds both",
58
+ },
59
+ "multiline": {
60
+ "type": "boolean",
61
+ "default": False,
62
+ "description": "Allow patterns to match across multiple lines. Useful for finding multi-line code blocks or comments",
63
+ },
64
+ "include_globs": {
65
+ "type": "array",
66
+ "items": {"type": "string"},
67
+ "description": "File patterns to include in search. Example: ['*.py', '*.js'] to search only Python and JavaScript files",
68
+ },
69
+ "exclude_globs": {
70
+ "type": "array",
71
+ "items": {"type": "string"},
72
+ "description": "File patterns to exclude from search. Example: ['*.log', '__pycache__/*'] to skip log files and cache directories",
73
+ },
74
+ "follow_symlinks": {
75
+ "type": "boolean",
76
+ "default": False,
77
+ "description": "Follow symbolic links during search. False=safer, True=may cause infinite loops",
78
+ },
79
+ "hidden": {
80
+ "type": "boolean",
81
+ "default": False,
82
+ "description": "Search in hidden files (starting with dot). False=skip .git, .env files, True=search all",
83
+ },
84
+ "no_ignore": {
85
+ "type": "boolean",
86
+ "default": False,
87
+ "description": "Ignore .gitignore and similar ignore files. False=respect ignore rules, True=search all files",
88
+ },
89
+ "max_filesize": {
90
+ "type": "string",
91
+ "description": "Maximum file size to search. Format: '10M'=10MB, '500K'=500KB, '1G'=1GB. Prevents searching huge files",
92
+ },
93
+ "context_before": {
94
+ "type": "integer",
95
+ "description": "Number of lines to show before each match. Useful for understanding match context. Example: 3 shows 3 lines before",
96
+ },
97
+ "context_after": {
98
+ "type": "integer",
99
+ "description": "Number of lines to show after each match. Useful for understanding match context. Example: 3 shows 3 lines after",
100
+ },
101
+ "encoding": {
102
+ "type": "string",
103
+ "description": "Text encoding to assume for files. Default is auto-detect. Example: 'utf-8', 'latin1', 'ascii'",
104
+ },
105
+ "max_count": {
106
+ "type": "integer",
107
+ "description": "Maximum number of matches per file. Useful to prevent overwhelming output from files with many matches",
108
+ },
109
+ "timeout_ms": {
110
+ "type": "integer",
111
+ "description": "Search timeout in milliseconds. Prevents long-running searches. Example: 5000 for 5 second timeout",
112
+ },
113
+ "count_only_matches": {
114
+ "type": "boolean",
115
+ "default": False,
116
+ "description": "Return only match counts per file instead of full match details. Useful for statistics and performance",
117
+ },
118
+ "summary_only": {
119
+ "type": "boolean",
120
+ "default": False,
121
+ "description": "Return a condensed summary of results to reduce context size. Shows top files and sample matches",
122
+ },
123
+ "optimize_paths": {
124
+ "type": "boolean",
125
+ "default": False,
126
+ "description": "Optimize file paths in results by removing common prefixes and shortening long paths. Saves tokens in output",
127
+ },
128
+ "group_by_file": {
129
+ "type": "boolean",
130
+ "default": False,
131
+ "description": "Group results by file to eliminate file path duplication when multiple matches exist in the same file. Significantly reduces tokens",
132
+ },
133
+ "total_only": {
134
+ "type": "boolean",
135
+ "default": False,
136
+ "description": "Return only the total match count as a number. Most token-efficient option for count queries. Takes priority over all other formats",
137
+ },
138
+ },
139
+ "required": ["query"],
140
+ "anyOf": [
141
+ {"required": ["roots"]},
142
+ {"required": ["files"]},
143
+ ],
144
+ "additionalProperties": False,
145
+ },
146
+ }
147
+
148
+ def _validate_roots(self, roots: list[str]) -> list[str]:
149
+ validated: list[str] = []
150
+ for r in roots:
151
+ resolved = self.path_resolver.resolve(r)
152
+ is_valid, error = self.security_validator.validate_directory_path(
153
+ resolved, must_exist=True
154
+ )
155
+ if not is_valid:
156
+ raise ValueError(f"Invalid root '{r}': {error}")
157
+ validated.append(resolved)
158
+ return validated
159
+
160
+ def _validate_files(self, files: list[str]) -> list[str]:
161
+ validated: list[str] = []
162
+ for p in files:
163
+ if not isinstance(p, str) or not p.strip():
164
+ raise ValueError("files entries must be non-empty strings")
165
+ resolved = self.path_resolver.resolve(p)
166
+ ok, err = self.security_validator.validate_file_path(resolved)
167
+ if not ok:
168
+ raise ValueError(f"Invalid file path '{p}': {err}")
169
+ if not Path(resolved).exists() or not Path(resolved).is_file():
170
+ raise ValueError(f"File not found: {p}")
171
+ validated.append(resolved)
172
+ return validated
173
+
174
+ def validate_arguments(self, arguments: dict[str, Any]) -> bool:
175
+ if (
176
+ "query" not in arguments
177
+ or not isinstance(arguments["query"], str)
178
+ or not arguments["query"].strip()
179
+ ):
180
+ raise ValueError("query is required and must be a non-empty string")
181
+ if "roots" not in arguments and "files" not in arguments:
182
+ raise ValueError("Either roots or files must be provided")
183
+ for key in [
184
+ "case",
185
+ "encoding",
186
+ "max_filesize",
187
+ ]:
188
+ if key in arguments and not isinstance(arguments[key], str):
189
+ raise ValueError(f"{key} must be a string")
190
+ for key in [
191
+ "fixed_strings",
192
+ "word",
193
+ "multiline",
194
+ "follow_symlinks",
195
+ "hidden",
196
+ "no_ignore",
197
+ "count_only_matches",
198
+ "summary_only",
199
+ ]:
200
+ if key in arguments and not isinstance(arguments[key], bool):
201
+ raise ValueError(f"{key} must be a boolean")
202
+ for key in ["context_before", "context_after", "max_count", "timeout_ms"]:
203
+ if key in arguments and not isinstance(arguments[key], int):
204
+ raise ValueError(f"{key} must be an integer")
205
+ for key in ["include_globs", "exclude_globs"]:
206
+ if key in arguments:
207
+ v = arguments[key]
208
+ if not isinstance(v, list) or not all(isinstance(x, str) for x in v):
209
+ raise ValueError(f"{key} must be an array of strings")
210
+ return True
211
+
212
+ @handle_mcp_errors("search_content")
213
+ async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
214
+ self.validate_arguments(arguments)
215
+
216
+ roots = arguments.get("roots")
217
+ files = arguments.get("files")
218
+ if roots:
219
+ roots = self._validate_roots(roots)
220
+ if files:
221
+ files = self._validate_files(files)
222
+
223
+ # Clamp counts to safety limits
224
+ max_count = fd_rg_utils.clamp_int(
225
+ arguments.get("max_count"),
226
+ fd_rg_utils.DEFAULT_RESULTS_LIMIT,
227
+ fd_rg_utils.MAX_RESULTS_HARD_CAP,
228
+ )
229
+ timeout_ms = arguments.get("timeout_ms")
230
+
231
+ # Note: --files-from is not supported in this ripgrep version
232
+ # For files mode, we'll search in the parent directories of the files
233
+ if files:
234
+ # Extract unique parent directories from file paths
235
+ parent_dirs = set()
236
+ for file_path in files:
237
+ resolved = self.path_resolver.resolve(file_path)
238
+ parent_dirs.add(str(Path(resolved).parent))
239
+
240
+ # Use parent directories as roots for compatibility
241
+ roots = list(parent_dirs)
242
+
243
+ # Check for count-only mode (total_only also requires count mode)
244
+ total_only = bool(arguments.get("total_only", False))
245
+ count_only_matches = (
246
+ bool(arguments.get("count_only_matches", False)) or total_only
247
+ )
248
+ summary_only = bool(arguments.get("summary_only", False))
249
+
250
+ # Roots mode
251
+ cmd = fd_rg_utils.build_rg_command(
252
+ query=arguments["query"],
253
+ case=arguments.get("case", "smart"),
254
+ fixed_strings=bool(arguments.get("fixed_strings", False)),
255
+ word=bool(arguments.get("word", False)),
256
+ multiline=bool(arguments.get("multiline", False)),
257
+ include_globs=arguments.get("include_globs"),
258
+ exclude_globs=arguments.get("exclude_globs"),
259
+ follow_symlinks=bool(arguments.get("follow_symlinks", False)),
260
+ hidden=bool(arguments.get("hidden", False)),
261
+ no_ignore=bool(arguments.get("no_ignore", False)),
262
+ max_filesize=arguments.get("max_filesize"),
263
+ context_before=arguments.get("context_before"),
264
+ context_after=arguments.get("context_after"),
265
+ encoding=arguments.get("encoding"),
266
+ max_count=max_count,
267
+ timeout_ms=timeout_ms,
268
+ roots=roots,
269
+ files_from=None,
270
+ count_only_matches=count_only_matches,
271
+ )
272
+
273
+ started = time.time()
274
+ rc, out, err = await fd_rg_utils.run_command_capture(cmd, timeout_ms=timeout_ms)
275
+ elapsed_ms = int((time.time() - started) * 1000)
276
+
277
+ if rc not in (0, 1):
278
+ message = err.decode("utf-8", errors="replace").strip() or "ripgrep failed"
279
+ return {"success": False, "error": message, "returncode": rc}
280
+
281
+ # Handle total-only mode (highest priority for count queries)
282
+ total_only = arguments.get("total_only", False)
283
+ if total_only:
284
+ # Parse count output and return only the total
285
+ file_counts = fd_rg_utils.parse_rg_count_output(out)
286
+ total_matches = file_counts.pop("__total__", 0)
287
+ return total_matches
288
+
289
+ # Handle count-only mode
290
+ if count_only_matches:
291
+ file_counts = fd_rg_utils.parse_rg_count_output(out)
292
+ total_matches = file_counts.pop("__total__", 0)
293
+ return {
294
+ "success": True,
295
+ "count_only": True,
296
+ "total_matches": total_matches,
297
+ "file_counts": file_counts,
298
+ "elapsed_ms": elapsed_ms,
299
+ }
300
+
301
+ # Handle normal mode
302
+ matches = fd_rg_utils.parse_rg_json_lines_to_matches(out)
303
+ truncated = len(matches) >= fd_rg_utils.MAX_RESULTS_HARD_CAP
304
+ if truncated:
305
+ matches = matches[: fd_rg_utils.MAX_RESULTS_HARD_CAP]
306
+
307
+ # Apply path optimization if requested
308
+ optimize_paths = arguments.get("optimize_paths", False)
309
+ if optimize_paths and matches:
310
+ matches = fd_rg_utils.optimize_match_paths(matches)
311
+
312
+ # Apply file grouping if requested (takes priority over other formats)
313
+ group_by_file = arguments.get("group_by_file", False)
314
+ if group_by_file and matches:
315
+ return fd_rg_utils.group_matches_by_file(matches)
316
+
317
+ # Handle summary mode
318
+ if summary_only:
319
+ summary = fd_rg_utils.summarize_search_results(matches)
320
+ return {
321
+ "success": True,
322
+ "count": len(matches),
323
+ "truncated": truncated,
324
+ "elapsed_ms": elapsed_ms,
325
+ "summary": summary,
326
+ }
327
+
328
+ return {
329
+ "success": True,
330
+ "count": len(matches),
331
+ "truncated": truncated,
332
+ "elapsed_ms": elapsed_ms,
333
+ "results": matches,
334
+ }