tree-sitter-analyzer 1.6.1.2__py3-none-any.whl → 1.6.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tree-sitter-analyzer might be problematic. Click here for more details.

@@ -0,0 +1,361 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ 統一ログ管理システム
4
+
5
+ ログ出力の重複問題を解決するためのLoggerManagerクラスを提供します。
6
+ シングルトンパターンによりロガーインスタンスを一意に管理し、
7
+ 重複ハンドラーを防止します。
8
+ """
9
+
10
+ import logging
11
+ import os
12
+ import sys
13
+ import tempfile
14
+ import threading
15
+ from pathlib import Path
16
+ from typing import Any, Dict, List, Optional
17
+
18
+
19
+ class LoggerManager:
20
+ """
21
+ 統一されたロガー管理クラス
22
+
23
+ シングルトンパターンでロガーインスタンスを管理し、
24
+ 重複ハンドラーを防止する。
25
+ """
26
+
27
+ _instance: Optional['LoggerManager'] = None
28
+ _lock: threading.Lock = threading.Lock()
29
+ _loggers: Dict[str, logging.Logger] = {}
30
+ _handler_registry: Dict[str, List[str]] = {}
31
+ _initialized: bool = False
32
+ _file_log_message_shown: bool = False
33
+
34
+ def __new__(cls) -> 'LoggerManager':
35
+ """スレッドセーフなシングルトン実装"""
36
+ if cls._instance is None:
37
+ with cls._lock:
38
+ if cls._instance is None:
39
+ cls._instance = super().__new__(cls)
40
+ return cls._instance
41
+
42
+ def __init__(self) -> None:
43
+ """初期化(シングルトンのため一度のみ実行)"""
44
+ if not self._initialized:
45
+ with self._lock:
46
+ if not self._initialized:
47
+ self._loggers = {}
48
+ self._handler_registry = {}
49
+ self._initialized = True
50
+
51
+ def get_logger(
52
+ self,
53
+ name: str = "tree_sitter_analyzer",
54
+ level: int | str = logging.WARNING
55
+ ) -> logging.Logger:
56
+ """
57
+ 重複を防ぐロガー取得
58
+
59
+ Args:
60
+ name: ロガー名
61
+ level: ログレベル
62
+
63
+ Returns:
64
+ 設定済みロガーインスタンス
65
+ """
66
+ with self._lock:
67
+ if name not in self._loggers:
68
+ self._loggers[name] = self._create_logger(name, level)
69
+ else:
70
+ # 既存のロガーでもレベルを更新
71
+ numeric_level = self._convert_level(level)
72
+
73
+ # 環境変数からのレベル設定が優先
74
+ env_level = os.environ.get("LOG_LEVEL", "").upper()
75
+ if env_level and env_level in ["DEBUG", "INFO", "WARNING", "ERROR"]:
76
+ numeric_level = getattr(logging, env_level)
77
+
78
+ self._loggers[name].setLevel(numeric_level)
79
+
80
+ return self._loggers[name]
81
+
82
+ def _create_logger(self, name: str, level: int | str) -> logging.Logger:
83
+ """
84
+ ロガー作成とハンドラー設定
85
+
86
+ Args:
87
+ name: ロガー名
88
+ level: ログレベル
89
+
90
+ Returns:
91
+ 設定済みロガーインスタンス
92
+ """
93
+ # レベル変換処理
94
+ numeric_level = self._convert_level(level)
95
+
96
+ # 環境変数からのレベル設定
97
+ env_level = os.environ.get("LOG_LEVEL", "").upper()
98
+ if env_level and env_level in ["DEBUG", "INFO", "WARNING", "ERROR"]:
99
+ numeric_level = getattr(logging, env_level)
100
+
101
+ logger = logging.getLogger(name)
102
+
103
+ # 重複ハンドラーチェック
104
+ if not self._has_required_handlers(logger, name):
105
+ self._setup_handlers(logger, name, numeric_level)
106
+
107
+ # ロガーレベル設定
108
+ logger.setLevel(numeric_level)
109
+
110
+ return logger
111
+
112
+ def _convert_level(self, level: int | str) -> int:
113
+ """ログレベル文字列を数値に変換"""
114
+ if isinstance(level, str):
115
+ level_upper = level.upper()
116
+ level_map = {
117
+ "DEBUG": logging.DEBUG,
118
+ "INFO": logging.INFO,
119
+ "WARNING": logging.WARNING,
120
+ "ERROR": logging.ERROR,
121
+ }
122
+ return level_map.get(level_upper, logging.WARNING)
123
+ return level
124
+
125
+ def _has_required_handlers(self, logger: logging.Logger, name: str) -> bool:
126
+ """
127
+ 必要なハンドラーが既に設定されているかチェック
128
+
129
+ Args:
130
+ logger: チェック対象ロガー
131
+ name: ロガー名
132
+
133
+ Returns:
134
+ 必要なハンドラーが設定済みの場合True
135
+ """
136
+ if name in self._handler_registry:
137
+ # 既に管理されているロガーの場合は設定済みとみなす
138
+ return True
139
+
140
+ # 既存ハンドラーの有無をチェック
141
+ has_stream_handler = any(
142
+ isinstance(h, logging.StreamHandler) and not isinstance(h, logging.FileHandler)
143
+ for h in logger.handlers
144
+ )
145
+
146
+ if has_stream_handler:
147
+ # ハンドラー登録を記録
148
+ handler_types = [type(h).__name__ for h in logger.handlers]
149
+ self._handler_registry[name] = handler_types
150
+ return True
151
+
152
+ return False
153
+
154
+ def _setup_handlers(self, logger: logging.Logger, name: str, level: int) -> None:
155
+ """
156
+ ロガーにハンドラーを設定
157
+
158
+ Args:
159
+ logger: 設定対象ロガー
160
+ name: ロガー名
161
+ level: ログレベル
162
+ """
163
+ # メインハンドラー(stderr)の追加
164
+ if not self._has_stream_handler(logger):
165
+ stream_handler = SafeStreamHandler()
166
+ formatter = logging.Formatter(
167
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
168
+ )
169
+ stream_handler.setFormatter(formatter)
170
+ logger.addHandler(stream_handler)
171
+
172
+ # ファイルログハンドラーの追加(環境変数で有効化)
173
+ enable_file_log = (
174
+ os.environ.get("TREE_SITTER_ANALYZER_ENABLE_FILE_LOG", "").lower() == "true"
175
+ )
176
+
177
+ if enable_file_log and not self._has_file_handler(logger):
178
+ file_handler = self._create_file_handler(level)
179
+ if file_handler:
180
+ logger.addHandler(file_handler)
181
+
182
+ # ハンドラー登録を記録
183
+ handler_types = [type(h).__name__ for h in logger.handlers]
184
+ self._handler_registry[name] = handler_types
185
+
186
+ def _has_stream_handler(self, logger: logging.Logger) -> bool:
187
+ """StreamHandlerの存在チェック"""
188
+ return any(
189
+ isinstance(h, logging.StreamHandler) and not isinstance(h, logging.FileHandler)
190
+ for h in logger.handlers
191
+ )
192
+
193
+ def _has_file_handler(self, logger: logging.Logger) -> bool:
194
+ """FileHandlerの存在チェック"""
195
+ return any(isinstance(h, logging.FileHandler) for h in logger.handlers)
196
+
197
+ def _create_file_handler(self, level: int) -> Optional[logging.FileHandler]:
198
+ """
199
+ ファイルハンドラーの作成
200
+
201
+ Args:
202
+ level: ログレベル
203
+
204
+ Returns:
205
+ 作成されたFileHandlerまたはNone
206
+ """
207
+ try:
208
+ # ログディレクトリの決定
209
+ log_dir = os.environ.get("TREE_SITTER_ANALYZER_LOG_DIR")
210
+ if log_dir:
211
+ log_path = Path(log_dir) / "tree_sitter_analyzer.log"
212
+ Path(log_dir).mkdir(parents=True, exist_ok=True)
213
+ else:
214
+ temp_dir = tempfile.gettempdir()
215
+ log_path = Path(temp_dir) / "tree_sitter_analyzer.log"
216
+
217
+ # ファイルログレベルの決定
218
+ file_log_level_str = os.environ.get(
219
+ "TREE_SITTER_ANALYZER_FILE_LOG_LEVEL", ""
220
+ ).upper()
221
+ file_log_level = level # デフォルトはメインレベル
222
+
223
+ if file_log_level_str in ["DEBUG", "INFO", "WARNING", "ERROR"]:
224
+ file_log_level = getattr(logging, file_log_level_str)
225
+
226
+ # ファイルハンドラー作成
227
+ file_handler = logging.FileHandler(str(log_path), encoding="utf-8")
228
+ formatter = logging.Formatter(
229
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
230
+ )
231
+ file_handler.setFormatter(formatter)
232
+ file_handler.setLevel(file_log_level)
233
+
234
+ # ファイルパス情報を出力(1回のみ)
235
+ if not LoggerManager._file_log_message_shown:
236
+ LoggerManager._file_log_message_shown = True
237
+ if hasattr(sys, "stderr") and hasattr(sys.stderr, "write"):
238
+ try:
239
+ sys.stderr.write(
240
+ f"[LoggerManager] File logging enabled: {log_path}\n"
241
+ )
242
+ except Exception:
243
+ pass
244
+
245
+ return file_handler
246
+
247
+ except Exception as e:
248
+ # ファイルハンドラー作成に失敗してもメインの動作は継続
249
+ if hasattr(sys, "stderr") and hasattr(sys.stderr, "write"):
250
+ try:
251
+ sys.stderr.write(
252
+ f"[LoggerManager] File handler creation failed: {e}\n"
253
+ )
254
+ except Exception:
255
+ pass
256
+ return None
257
+
258
+ def reset_for_testing(self) -> None:
259
+ """
260
+ テスト用リセット機能
261
+
262
+ Note:
263
+ 本番環境では使用しないこと
264
+ """
265
+ with self._lock:
266
+ # 全ハンドラーのクリーンアップ
267
+ for logger in self._loggers.values():
268
+ for handler in logger.handlers[:]:
269
+ try:
270
+ handler.close()
271
+ logger.removeHandler(handler)
272
+ except Exception:
273
+ pass
274
+
275
+ self._loggers.clear()
276
+ self._handler_registry.clear()
277
+ LoggerManager._file_log_message_shown = False
278
+
279
+
280
+ class SafeStreamHandler(logging.StreamHandler):
281
+ """
282
+ 安全なStreamHandler実装
283
+
284
+ MCPプロトコルのstdio通信やテスト環境での
285
+ ストリームクローズ問題に対応。
286
+ """
287
+
288
+ def __init__(self, stream=None):
289
+ # デフォルトでstderrを使用(stdoutはMCP用に保持)
290
+ super().__init__(stream if stream is not None else sys.stderr)
291
+
292
+ def emit(self, record: Any) -> None:
293
+ """
294
+ レコードの安全な出力
295
+
296
+ Args:
297
+ record: ログレコード
298
+ """
299
+ try:
300
+ # ストリームの状態チェック
301
+ if hasattr(self.stream, "closed") and self.stream.closed:
302
+ return
303
+
304
+ if not hasattr(self.stream, "write"):
305
+ return
306
+
307
+ # pytest環境での特別処理
308
+ stream_name = getattr(self.stream, "name", "")
309
+ if stream_name is None or "pytest" in str(type(self.stream)).lower():
310
+ try:
311
+ super().emit(record)
312
+ return
313
+ except (ValueError, OSError, AttributeError, UnicodeError):
314
+ return
315
+
316
+ # 通常のストリーム書き込み可能性チェック
317
+ try:
318
+ if hasattr(self.stream, "writable") and not self.stream.writable():
319
+ return
320
+ except (ValueError, OSError, AttributeError, UnicodeError):
321
+ return
322
+
323
+ super().emit(record)
324
+
325
+ except (ValueError, OSError, AttributeError, UnicodeError):
326
+ # I/Oエラーは静かに無視(シャットダウン時やpytestキャプチャ時)
327
+ pass
328
+ except Exception:
329
+ # その他の予期しないエラーも静かに無視
330
+ pass
331
+
332
+
333
+ # グローバルインスタンス
334
+ _logger_manager = LoggerManager()
335
+
336
+
337
+ def get_logger_manager() -> LoggerManager:
338
+ """
339
+ LoggerManagerのグローバルインスタンス取得
340
+
341
+ Returns:
342
+ LoggerManagerインスタンス
343
+ """
344
+ return _logger_manager
345
+
346
+
347
+ def get_unified_logger(
348
+ name: str = "tree_sitter_analyzer",
349
+ level: int | str = logging.WARNING
350
+ ) -> logging.Logger:
351
+ """
352
+ 統一されたロガー取得関数
353
+
354
+ Args:
355
+ name: ロガー名
356
+ level: ログレベル
357
+
358
+ Returns:
359
+ 設定済みロガーインスタンス
360
+ """
361
+ return _logger_manager.get_logger(name, level)
@@ -67,7 +67,7 @@ from .tools.read_partial_tool import ReadPartialTool
67
67
  from .tools.search_content_tool import SearchContentTool
68
68
  from .tools.table_format_tool import TableFormatTool
69
69
 
70
- # Set up logging
70
+ # Set up logging using unified LoggerManager
71
71
  logger = setup_logger(__name__)
72
72
 
73
73
 
@@ -0,0 +1,147 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Output format parameter validation for search_content tool.
4
+
5
+ Ensures mutual exclusion of output format parameters to prevent conflicts.
6
+ """
7
+
8
+ import locale
9
+ import os
10
+ from typing import Any
11
+
12
+
13
+ class OutputFormatValidator:
14
+ """Validator for output format parameters mutual exclusion."""
15
+
16
+ # Output format parameters that are mutually exclusive
17
+ OUTPUT_FORMAT_PARAMS = {
18
+ "total_only",
19
+ "count_only_matches",
20
+ "summary_only",
21
+ "group_by_file",
22
+ "optimize_paths"
23
+ }
24
+
25
+ # Token efficiency guidance for error messages
26
+ FORMAT_EFFICIENCY_GUIDE = {
27
+ "total_only": "~10 tokens (most efficient for count queries)",
28
+ "count_only_matches": "~50-200 tokens (file distribution analysis)",
29
+ "summary_only": "~500-2000 tokens (initial investigation)",
30
+ "group_by_file": "~2000-10000 tokens (context-aware review)",
31
+ "optimize_paths": "10-30% reduction (path compression)"
32
+ }
33
+
34
+ def _detect_language(self) -> str:
35
+ """Detect preferred language from environment."""
36
+ # Check environment variables for language preference
37
+ lang = os.environ.get('LANG', '')
38
+ if lang.startswith('ja'):
39
+ return 'ja'
40
+
41
+ # Check locale
42
+ try:
43
+ current_locale = locale.getlocale()[0]
44
+ if current_locale and current_locale.startswith('ja'):
45
+ return 'ja'
46
+ except Exception:
47
+ pass
48
+
49
+ # Default to English
50
+ return 'en'
51
+
52
+ def _get_error_message(self, specified_formats: list[str]) -> str:
53
+ """Generate localized error message with usage examples."""
54
+ lang = self._detect_language()
55
+ format_list = ", ".join(specified_formats)
56
+
57
+ if lang == 'ja':
58
+ # Japanese error message
59
+ base_message = (
60
+ f"⚠️ 出力形式パラメータエラー: 複数指定できません: {format_list}\n\n"
61
+ f"📋 排他的パラメータ: {', '.join(self.OUTPUT_FORMAT_PARAMS)}\n\n"
62
+ f"💡 効率性ガイド:\n"
63
+ )
64
+
65
+ for param, desc in self.FORMAT_EFFICIENCY_GUIDE.items():
66
+ base_message += f" • {param}: {desc}\n"
67
+
68
+ base_message += (
69
+ "\n✅ 推奨パターン:\n"
70
+ " • 件数確認: total_only=true\n"
71
+ " • ファイル分布: count_only_matches=true\n"
72
+ " • 初期調査: summary_only=true\n"
73
+ " • 詳細レビュー: group_by_file=true\n"
74
+ " • パス最適化: optimize_paths=true\n\n"
75
+ "❌ 間違った例: {\"total_only\": true, \"summary_only\": true}\n"
76
+ "✅ 正しい例: {\"total_only\": true}"
77
+ )
78
+ else:
79
+ # English error message
80
+ base_message = (
81
+ f"⚠️ Output Format Parameter Error: Multiple formats specified: {format_list}\n\n"
82
+ f"📋 Mutually Exclusive Parameters: {', '.join(self.OUTPUT_FORMAT_PARAMS)}\n\n"
83
+ f"💡 Token Efficiency Guide:\n"
84
+ )
85
+
86
+ for param, desc in self.FORMAT_EFFICIENCY_GUIDE.items():
87
+ base_message += f" • {param}: {desc}\n"
88
+
89
+ base_message += (
90
+ "\n✅ Recommended Usage Patterns:\n"
91
+ " • Count validation: total_only=true\n"
92
+ " • File distribution: count_only_matches=true\n"
93
+ " • Initial investigation: summary_only=true\n"
94
+ " • Detailed review: group_by_file=true\n"
95
+ " • Path optimization: optimize_paths=true\n\n"
96
+ "❌ Incorrect: {\"total_only\": true, \"summary_only\": true}\n"
97
+ "✅ Correct: {\"total_only\": true}"
98
+ )
99
+
100
+ return base_message
101
+
102
+ def validate_output_format_exclusion(self, arguments: dict[str, Any]) -> None:
103
+ """
104
+ Validate that only one output format parameter is specified.
105
+
106
+ Args:
107
+ arguments: Tool arguments dictionary
108
+
109
+ Raises:
110
+ ValueError: If multiple output format parameters are specified
111
+ """
112
+ specified_formats = []
113
+
114
+ for param in self.OUTPUT_FORMAT_PARAMS:
115
+ if arguments.get(param, False):
116
+ specified_formats.append(param)
117
+
118
+ if len(specified_formats) > 1:
119
+ error_message = self._get_error_message(specified_formats)
120
+ raise ValueError(error_message)
121
+
122
+ def get_active_format(self, arguments: dict[str, Any]) -> str:
123
+ """
124
+ Get the active output format from arguments.
125
+
126
+ Args:
127
+ arguments: Tool arguments dictionary
128
+
129
+ Returns:
130
+ Active format name or "normal" if none specified
131
+ """
132
+ for param in self.OUTPUT_FORMAT_PARAMS:
133
+ if arguments.get(param, False):
134
+ return param
135
+ return "normal"
136
+
137
+
138
+ # Global validator instance
139
+ _default_validator = None
140
+
141
+
142
+ def get_default_validator() -> OutputFormatValidator:
143
+ """Get the default output format validator instance."""
144
+ global _default_validator
145
+ if _default_validator is None:
146
+ _default_validator = OutputFormatValidator()
147
+ return _default_validator
@@ -17,6 +17,7 @@ from ..utils.gitignore_detector import get_default_detector
17
17
  from ..utils.search_cache import get_default_cache
18
18
  from . import fd_rg_utils
19
19
  from .base_tool import BaseMCPTool
20
+ from .output_format_validator import get_default_validator
20
21
 
21
22
  logger = logging.getLogger(__name__)
22
23
 
@@ -40,7 +41,26 @@ class SearchContentTool(BaseMCPTool):
40
41
  def get_tool_definition(self) -> dict[str, Any]:
41
42
  return {
42
43
  "name": "search_content",
43
- "description": "Search text content inside files using ripgrep. Supports regex patterns, case sensitivity, context lines, and various output formats. Can search in directories or specific files.",
44
+ "description": """Search text content inside files using ripgrep. Supports regex patterns, case sensitivity, context lines, and various output formats. Can search in directories or specific files.
45
+
46
+ ⚠️ IMPORTANT: Token Efficiency Guide
47
+ Choose output format parameters based on your needs to minimize token usage and maximize performance with efficient search strategies:
48
+
49
+ 🎯 RECOMMENDED WORKFLOW (Most Efficient Approach):
50
+ 1. START with total_only=true parameter for initial count validation (~10 tokens)
51
+ 2. IF more detail needed, use count_only_matches=true parameter for file distribution (~50-200 tokens)
52
+ 3. IF context needed, use summary_only=true parameter for overview (~500-2000 tokens)
53
+ 4. ONLY use full results when specific content review is required (~2000-50000+ tokens)
54
+
55
+ 💡 TOKEN EFFICIENCY COMPARISON:
56
+ - total_only: ~10 tokens (single number) - MOST EFFICIENT for count queries
57
+ - count_only_matches: ~50-200 tokens (file counts) - Good for file distribution analysis
58
+ - summary_only: ~500-2000 tokens (condensed overview) - initial investigation
59
+ - group_by_file: ~2000-10000 tokens (organized by file) - Context-aware review
60
+ - optimize_paths: 10-30% reduction (path compression) - Use with deep directory structures
61
+ - Full results: ~2000-50000+ tokens - Use sparingly for detailed analysis
62
+
63
+ ⚠️ MUTUALLY EXCLUSIVE: Only one output format parameter can be true at a time. Cannot be combined with other format parameters.""",
44
64
  "inputSchema": {
45
65
  "type": "object",
46
66
  "properties": {
@@ -131,27 +151,27 @@ class SearchContentTool(BaseMCPTool):
131
151
  "count_only_matches": {
132
152
  "type": "boolean",
133
153
  "default": False,
134
- "description": "Return only match counts per file instead of full match details. Useful for statistics and performance",
154
+ "description": "⚠️ EXCLUSIVE: Return only match counts per file (~50-200 tokens). RECOMMENDED for: File distribution analysis, understanding match spread across files. Cannot be combined with other output formats.",
135
155
  },
136
156
  "summary_only": {
137
157
  "type": "boolean",
138
158
  "default": False,
139
- "description": "Return a condensed summary of results to reduce context size. Shows top files and sample matches",
159
+ "description": "⚠️ EXCLUSIVE: Return condensed overview with top files and sample matches (~500-2000 tokens). RECOMMENDED for: Initial investigation, scope confirmation, pattern validation. Cannot be combined with other output formats.",
140
160
  },
141
161
  "optimize_paths": {
142
162
  "type": "boolean",
143
163
  "default": False,
144
- "description": "Optimize file paths in results by removing common prefixes and shortening long paths. Saves tokens in output",
164
+ "description": "⚠️ EXCLUSIVE: Optimize file paths by removing common prefixes (10-30% token reduction). RECOMMENDED for: Deep directory structures, large codebases. Cannot be combined with other output formats.",
145
165
  },
146
166
  "group_by_file": {
147
167
  "type": "boolean",
148
168
  "default": False,
149
- "description": "Group results by file to eliminate file path duplication when multiple matches exist in the same file. Significantly reduces tokens",
169
+ "description": "⚠️ EXCLUSIVE: Group results by file, eliminating path duplication (~2000-10000 tokens). RECOMMENDED for: Context-aware review, analyzing matches within specific files. Cannot be combined with other output formats.",
150
170
  },
151
171
  "total_only": {
152
172
  "type": "boolean",
153
173
  "default": False,
154
- "description": "Return only the total match count as a number. Most token-efficient option for count queries. Takes priority over all other formats",
174
+ "description": "⚠️ EXCLUSIVE: Return only total match count as single number (~10 tokens - MOST EFFICIENT). RECOMMENDED for: Count validation, filtering decisions, existence checks. Takes priority over all other formats. Cannot be combined with other output formats.",
155
175
  },
156
176
  },
157
177
  "required": ["query"],
@@ -214,6 +234,9 @@ class SearchContentTool(BaseMCPTool):
214
234
  "no_ignore",
215
235
  "count_only_matches",
216
236
  "summary_only",
237
+ "total_only",
238
+ "group_by_file",
239
+ "optimize_paths",
217
240
  ]:
218
241
  if key in arguments and not isinstance(arguments[key], bool):
219
242
  raise ValueError(f"{key} must be a boolean")
@@ -226,6 +249,10 @@ class SearchContentTool(BaseMCPTool):
226
249
  if not isinstance(v, list) or not all(isinstance(x, str) for x in v):
227
250
  raise ValueError(f"{key} must be an array of strings")
228
251
 
252
+ # Validate output format parameter exclusion
253
+ validator = get_default_validator()
254
+ validator.validate_output_format_exclusion(arguments)
255
+
229
256
  # Validate roots and files if provided
230
257
  if "roots" in arguments:
231
258
  self._validate_roots(arguments["roots"])
@@ -310,13 +337,19 @@ class SearchContentTool(BaseMCPTool):
310
337
  if isinstance(cached_result, dict):
311
338
  cached_result = cached_result.copy()
312
339
  cached_result["cache_hit"] = True
313
- return cached_result
340
+ return cached_result
341
+ elif isinstance(cached_result, int):
342
+ # Handle int results (for total_only)
343
+ return cached_result
344
+ else:
345
+ # Convert other types to dict format for type safety
346
+ return {"success": True, "cache_hit": True, "value": cached_result}
314
347
 
315
348
  # Clamp counts to safety limits
316
349
  max_count = fd_rg_utils.clamp_int(
317
350
  arguments.get("max_count"),
318
351
  fd_rg_utils.DEFAULT_RESULTS_LIMIT,
319
- fd_rg_utils.DEFAULT_RESULTS_LIMIT,
352
+ fd_rg_utils.MAX_RESULTS_HARD_CAP,
320
353
  )
321
354
  timeout_ms = arguments.get("timeout_ms")
322
355