tree-sitter-analyzer 1.6.1.2__py3-none-any.whl → 1.6.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tree-sitter-analyzer might be problematic. Click here for more details.

@@ -0,0 +1,147 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Output format parameter validation for search_content tool.
4
+
5
+ Ensures mutual exclusion of output format parameters to prevent conflicts.
6
+ """
7
+
8
+ import locale
9
+ import os
10
+ from typing import Any
11
+
12
+
13
+ class OutputFormatValidator:
14
+ """Validator for output format parameters mutual exclusion."""
15
+
16
+ # Output format parameters that are mutually exclusive
17
+ OUTPUT_FORMAT_PARAMS = {
18
+ "total_only",
19
+ "count_only_matches",
20
+ "summary_only",
21
+ "group_by_file",
22
+ "optimize_paths"
23
+ }
24
+
25
+ # Token efficiency guidance for error messages
26
+ FORMAT_EFFICIENCY_GUIDE = {
27
+ "total_only": "~10 tokens (most efficient for count queries)",
28
+ "count_only_matches": "~50-200 tokens (file distribution analysis)",
29
+ "summary_only": "~500-2000 tokens (initial investigation)",
30
+ "group_by_file": "~2000-10000 tokens (context-aware review)",
31
+ "optimize_paths": "10-30% reduction (path compression)"
32
+ }
33
+
34
+ def _detect_language(self) -> str:
35
+ """Detect preferred language from environment."""
36
+ # Check environment variables for language preference
37
+ lang = os.environ.get('LANG', '')
38
+ if lang.startswith('ja'):
39
+ return 'ja'
40
+
41
+ # Check locale
42
+ try:
43
+ current_locale = locale.getlocale()[0]
44
+ if current_locale and current_locale.startswith('ja'):
45
+ return 'ja'
46
+ except Exception:
47
+ pass
48
+
49
+ # Default to English
50
+ return 'en'
51
+
52
+ def _get_error_message(self, specified_formats: list[str]) -> str:
53
+ """Generate localized error message with usage examples."""
54
+ lang = self._detect_language()
55
+ format_list = ", ".join(specified_formats)
56
+
57
+ if lang == 'ja':
58
+ # Japanese error message
59
+ base_message = (
60
+ f"⚠️ 出力形式パラメータエラー: 複数指定できません: {format_list}\n\n"
61
+ f"📋 排他的パラメータ: {', '.join(self.OUTPUT_FORMAT_PARAMS)}\n\n"
62
+ f"💡 効率性ガイド:\n"
63
+ )
64
+
65
+ for param, desc in self.FORMAT_EFFICIENCY_GUIDE.items():
66
+ base_message += f" • {param}: {desc}\n"
67
+
68
+ base_message += (
69
+ "\n✅ 推奨パターン:\n"
70
+ " • 件数確認: total_only=true\n"
71
+ " • ファイル分布: count_only_matches=true\n"
72
+ " • 初期調査: summary_only=true\n"
73
+ " • 詳細レビュー: group_by_file=true\n"
74
+ " • パス最適化: optimize_paths=true\n\n"
75
+ "❌ 間違った例: {\"total_only\": true, \"summary_only\": true}\n"
76
+ "✅ 正しい例: {\"total_only\": true}"
77
+ )
78
+ else:
79
+ # English error message
80
+ base_message = (
81
+ f"⚠️ Output Format Parameter Error: Multiple formats specified: {format_list}\n\n"
82
+ f"📋 Mutually Exclusive Parameters: {', '.join(self.OUTPUT_FORMAT_PARAMS)}\n\n"
83
+ f"💡 Token Efficiency Guide:\n"
84
+ )
85
+
86
+ for param, desc in self.FORMAT_EFFICIENCY_GUIDE.items():
87
+ base_message += f" • {param}: {desc}\n"
88
+
89
+ base_message += (
90
+ "\n✅ Recommended Usage Patterns:\n"
91
+ " • Count validation: total_only=true\n"
92
+ " • File distribution: count_only_matches=true\n"
93
+ " • Initial investigation: summary_only=true\n"
94
+ " • Detailed review: group_by_file=true\n"
95
+ " • Path optimization: optimize_paths=true\n\n"
96
+ "❌ Incorrect: {\"total_only\": true, \"summary_only\": true}\n"
97
+ "✅ Correct: {\"total_only\": true}"
98
+ )
99
+
100
+ return base_message
101
+
102
+ def validate_output_format_exclusion(self, arguments: dict[str, Any]) -> None:
103
+ """
104
+ Validate that only one output format parameter is specified.
105
+
106
+ Args:
107
+ arguments: Tool arguments dictionary
108
+
109
+ Raises:
110
+ ValueError: If multiple output format parameters are specified
111
+ """
112
+ specified_formats = []
113
+
114
+ for param in self.OUTPUT_FORMAT_PARAMS:
115
+ if arguments.get(param, False):
116
+ specified_formats.append(param)
117
+
118
+ if len(specified_formats) > 1:
119
+ error_message = self._get_error_message(specified_formats)
120
+ raise ValueError(error_message)
121
+
122
+ def get_active_format(self, arguments: dict[str, Any]) -> str:
123
+ """
124
+ Get the active output format from arguments.
125
+
126
+ Args:
127
+ arguments: Tool arguments dictionary
128
+
129
+ Returns:
130
+ Active format name or "normal" if none specified
131
+ """
132
+ for param in self.OUTPUT_FORMAT_PARAMS:
133
+ if arguments.get(param, False):
134
+ return param
135
+ return "normal"
136
+
137
+
138
+ # Global validator instance
139
+ _default_validator = None
140
+
141
+
142
+ def get_default_validator() -> OutputFormatValidator:
143
+ """Get the default output format validator instance."""
144
+ global _default_validator
145
+ if _default_validator is None:
146
+ _default_validator = OutputFormatValidator()
147
+ return _default_validator
@@ -17,6 +17,7 @@ from ..utils.gitignore_detector import get_default_detector
17
17
  from ..utils.search_cache import get_default_cache
18
18
  from . import fd_rg_utils
19
19
  from .base_tool import BaseMCPTool
20
+ from .output_format_validator import get_default_validator
20
21
 
21
22
  logger = logging.getLogger(__name__)
22
23
 
@@ -40,7 +41,26 @@ class SearchContentTool(BaseMCPTool):
40
41
  def get_tool_definition(self) -> dict[str, Any]:
41
42
  return {
42
43
  "name": "search_content",
43
- "description": "Search text content inside files using ripgrep. Supports regex patterns, case sensitivity, context lines, and various output formats. Can search in directories or specific files.",
44
+ "description": """Search text content inside files using ripgrep. Supports regex patterns, case sensitivity, context lines, and various output formats. Can search in directories or specific files.
45
+
46
+ ⚠️ IMPORTANT: Token Efficiency Guide
47
+ Choose output format parameters based on your needs to minimize token usage and maximize performance with efficient search strategies:
48
+
49
+ 🎯 RECOMMENDED WORKFLOW (Most Efficient Approach):
50
+ 1. START with total_only=true parameter for initial count validation (~10 tokens)
51
+ 2. IF more detail needed, use count_only_matches=true parameter for file distribution (~50-200 tokens)
52
+ 3. IF context needed, use summary_only=true parameter for overview (~500-2000 tokens)
53
+ 4. ONLY use full results when specific content review is required (~2000-50000+ tokens)
54
+
55
+ 💡 TOKEN EFFICIENCY COMPARISON:
56
+ - total_only: ~10 tokens (single number) - MOST EFFICIENT for count queries
57
+ - count_only_matches: ~50-200 tokens (file counts) - Good for file distribution analysis
58
+ - summary_only: ~500-2000 tokens (condensed overview) - initial investigation
59
+ - group_by_file: ~2000-10000 tokens (organized by file) - Context-aware review
60
+ - optimize_paths: 10-30% reduction (path compression) - Use with deep directory structures
61
+ - Full results: ~2000-50000+ tokens - Use sparingly for detailed analysis
62
+
63
+ ⚠️ MUTUALLY EXCLUSIVE: Only one output format parameter can be true at a time. Cannot be combined with other format parameters.""",
44
64
  "inputSchema": {
45
65
  "type": "object",
46
66
  "properties": {
@@ -131,27 +151,27 @@ class SearchContentTool(BaseMCPTool):
131
151
  "count_only_matches": {
132
152
  "type": "boolean",
133
153
  "default": False,
134
- "description": "Return only match counts per file instead of full match details. Useful for statistics and performance",
154
+ "description": "⚠️ EXCLUSIVE: Return only match counts per file (~50-200 tokens). RECOMMENDED for: File distribution analysis, understanding match spread across files. Cannot be combined with other output formats.",
135
155
  },
136
156
  "summary_only": {
137
157
  "type": "boolean",
138
158
  "default": False,
139
- "description": "Return a condensed summary of results to reduce context size. Shows top files and sample matches",
159
+ "description": "⚠️ EXCLUSIVE: Return condensed overview with top files and sample matches (~500-2000 tokens). RECOMMENDED for: Initial investigation, scope confirmation, pattern validation. Cannot be combined with other output formats.",
140
160
  },
141
161
  "optimize_paths": {
142
162
  "type": "boolean",
143
163
  "default": False,
144
- "description": "Optimize file paths in results by removing common prefixes and shortening long paths. Saves tokens in output",
164
+ "description": "⚠️ EXCLUSIVE: Optimize file paths by removing common prefixes (10-30% token reduction). RECOMMENDED for: Deep directory structures, large codebases. Cannot be combined with other output formats.",
145
165
  },
146
166
  "group_by_file": {
147
167
  "type": "boolean",
148
168
  "default": False,
149
- "description": "Group results by file to eliminate file path duplication when multiple matches exist in the same file. Significantly reduces tokens",
169
+ "description": "⚠️ EXCLUSIVE: Group results by file, eliminating path duplication (~2000-10000 tokens). RECOMMENDED for: Context-aware review, analyzing matches within specific files. Cannot be combined with other output formats.",
150
170
  },
151
171
  "total_only": {
152
172
  "type": "boolean",
153
173
  "default": False,
154
- "description": "Return only the total match count as a number. Most token-efficient option for count queries. Takes priority over all other formats",
174
+ "description": "⚠️ EXCLUSIVE: Return only total match count as single number (~10 tokens - MOST EFFICIENT). RECOMMENDED for: Count validation, filtering decisions, existence checks. Takes priority over all other formats. Cannot be combined with other output formats.",
155
175
  },
156
176
  },
157
177
  "required": ["query"],
@@ -214,6 +234,9 @@ class SearchContentTool(BaseMCPTool):
214
234
  "no_ignore",
215
235
  "count_only_matches",
216
236
  "summary_only",
237
+ "total_only",
238
+ "group_by_file",
239
+ "optimize_paths",
217
240
  ]:
218
241
  if key in arguments and not isinstance(arguments[key], bool):
219
242
  raise ValueError(f"{key} must be a boolean")
@@ -226,6 +249,10 @@ class SearchContentTool(BaseMCPTool):
226
249
  if not isinstance(v, list) or not all(isinstance(x, str) for x in v):
227
250
  raise ValueError(f"{key} must be an array of strings")
228
251
 
252
+ # Validate output format parameter exclusion
253
+ validator = get_default_validator()
254
+ validator.validate_output_format_exclusion(arguments)
255
+
229
256
  # Validate roots and files if provided
230
257
  if "roots" in arguments:
231
258
  self._validate_roots(arguments["roots"])
@@ -310,13 +337,19 @@ class SearchContentTool(BaseMCPTool):
310
337
  if isinstance(cached_result, dict):
311
338
  cached_result = cached_result.copy()
312
339
  cached_result["cache_hit"] = True
313
- return cached_result
340
+ return cached_result
341
+ elif isinstance(cached_result, int):
342
+ # Handle int results (for total_only)
343
+ return cached_result
344
+ else:
345
+ # Convert other types to dict format for type safety
346
+ return {"success": True, "cache_hit": True, "value": cached_result}
314
347
 
315
348
  # Clamp counts to safety limits
316
349
  max_count = fd_rg_utils.clamp_int(
317
350
  arguments.get("max_count"),
318
351
  fd_rg_utils.DEFAULT_RESULTS_LIMIT,
319
- fd_rg_utils.DEFAULT_RESULTS_LIMIT,
352
+ fd_rg_utils.MAX_RESULTS_HARD_CAP,
320
353
  )
321
354
  timeout_ms = arguments.get("timeout_ms")
322
355
 
@@ -8,6 +8,9 @@ appropriate extensions based on content type, with security validation.
8
8
 
9
9
  import json
10
10
  import os
11
+ import tempfile
12
+ import threading
13
+ import time
11
14
  from pathlib import Path
12
15
  from typing import Any
13
16
 
@@ -22,6 +25,72 @@ class FileOutputManager:
22
25
  Manages file output for analysis results with automatic extension detection
23
26
  and security validation.
24
27
  """
28
+
29
+ # クラス変数で警告メッセージの重複を防ぐ(プロセス内のみ)
30
+ _warning_messages_shown = set()
31
+ # スレッド間の排他制御用ロック
32
+ _warning_lock = threading.Lock()
33
+
34
+ # プロセス間共有用のファイルベース重複防止
35
+ @staticmethod
36
+ def _get_warning_lock_file(warning_key: str) -> Path:
37
+ """警告メッセージ用のロックファイルパスを取得"""
38
+ temp_dir = Path(tempfile.gettempdir())
39
+ safe_key = warning_key.replace("/", "_").replace(":", "_").replace("\\", "_")
40
+ return temp_dir / f"tree_sitter_analyzer_warning_{safe_key}.lock"
41
+
42
+ @staticmethod
43
+ def _should_show_warning(warning_key: str, max_age_seconds: int = 300) -> bool:
44
+ """
45
+ プロセス間で警告表示の可否を判定
46
+
47
+ Args:
48
+ warning_key: 警告キー
49
+ max_age_seconds: ロックファイルの有効期間(秒)
50
+
51
+ Returns:
52
+ 警告を表示すべきかどうか
53
+ """
54
+ # スレッド間の排他制御
55
+ with FileOutputManager._warning_lock:
56
+ # プロセス内での重複チェック
57
+ if warning_key in FileOutputManager._warning_messages_shown:
58
+ return False
59
+
60
+ # プロセス間での重複チェック
61
+ lock_file = FileOutputManager._get_warning_lock_file(warning_key)
62
+
63
+ try:
64
+ # ロックファイルが存在し、有効期間内なら警告をスキップ
65
+ if lock_file.exists():
66
+ mtime = lock_file.stat().st_mtime
67
+ if time.time() - mtime < max_age_seconds:
68
+ FileOutputManager._warning_messages_shown.add(warning_key)
69
+ return False
70
+ else:
71
+ # 期限切れのロックファイルを削除
72
+ try:
73
+ lock_file.unlink()
74
+ except (OSError, FileNotFoundError):
75
+ pass
76
+
77
+ # ロックファイルを排他的に作成して警告表示権を獲得
78
+ # 'x'モードは、ファイルが既に存在する場合FileExistsErrorを発生させる
79
+ try:
80
+ with open(lock_file, 'x') as f:
81
+ f.write(str(time.time()))
82
+ FileOutputManager._warning_messages_shown.add(warning_key)
83
+ return True
84
+ except FileExistsError:
85
+ # 別のプロセスが先にロックを獲得した
86
+ FileOutputManager._warning_messages_shown.add(warning_key)
87
+ return False
88
+
89
+ except (OSError, IOError):
90
+ # ファイル操作に失敗した場合はフォールバック
91
+ # プロセス内のみの重複防止に戻る
92
+ FileOutputManager._warning_messages_shown.add(warning_key)
93
+ return True
25
94
 
26
95
  def __init__(self, project_root: str | None = None):
27
96
  """
@@ -51,7 +120,11 @@ class FileOutputManager:
51
120
 
52
121
  # Priority 3: Current working directory as fallback
53
122
  self._output_path = str(Path.cwd())
54
- logger.warning(f"Using current directory as output path: {self._output_path}")
123
+
124
+ # プロセス間で重複警告を防ぐ
125
+ warning_key = f"fallback_path:{self._output_path}"
126
+ if self._should_show_warning(warning_key):
127
+ logger.warning(f"Using current directory as output path: {self._output_path}")
55
128
 
56
129
  def get_output_path(self) -> str:
57
130
  """
@@ -308,6 +308,15 @@ class SearchCache:
308
308
  "word": params.get("word", False),
309
309
  "multiline": params.get("multiline", False),
310
310
  "max_filesize": params.get("max_filesize", ""),
311
+ # Include output format parameters in cache key
312
+ "total_only": params.get("total_only", False),
313
+ "count_only_matches": params.get("count_only_matches", False),
314
+ "summary_only": params.get("summary_only", False),
315
+ "group_by_file": params.get("group_by_file", False),
316
+ "optimize_paths": params.get("optimize_paths", False),
317
+ "max_count": params.get("max_count", None),
318
+ "context_before": params.get("context_before", None),
319
+ "context_after": params.get("context_after", None),
311
320
  }
312
321
 
313
322
  # Create deterministic key
@@ -14,191 +14,29 @@ from functools import wraps
14
14
  from pathlib import Path
15
15
  from typing import Any
16
16
 
17
+ # Import the new unified logging manager
18
+ from .logging_manager import get_logger_manager, SafeStreamHandler
19
+
17
20
 
18
- # Configure global logger
19
21
  def setup_logger(
20
22
  name: str = "tree_sitter_analyzer", level: int | str = logging.WARNING
21
23
  ) -> logging.Logger:
22
- """Setup unified logger for the project"""
23
- # Handle string level parameter
24
- if isinstance(level, str):
25
- level_upper = level.upper()
26
- if level_upper == "DEBUG":
27
- level = logging.DEBUG
28
- elif level_upper == "INFO":
29
- level = logging.INFO
30
- elif level_upper == "WARNING":
31
- level = logging.WARNING
32
- elif level_upper == "ERROR":
33
- level = logging.ERROR
34
- else:
35
- level = logging.WARNING # Default fallback
36
-
37
- # Get log level from environment variable (only if set and not empty)
38
- env_level = os.environ.get("LOG_LEVEL", "").upper()
39
- if env_level and env_level in ["DEBUG", "INFO", "WARNING", "ERROR"]:
40
- if env_level == "DEBUG":
41
- level = logging.DEBUG
42
- elif env_level == "INFO":
43
- level = logging.INFO
44
- elif env_level == "WARNING":
45
- level = logging.WARNING
46
- elif env_level == "ERROR":
47
- level = logging.ERROR
48
- # If env_level is empty or not recognized, use the passed level parameter
49
-
50
- logger = logging.getLogger(name)
51
-
52
- # Clear existing handlers if this is a test logger to ensure clean state
53
- if name.startswith("test_"):
54
- logger.handlers.clear()
55
-
56
- # Initialize file logging variables at function scope
57
- enable_file_log = (
58
- os.environ.get("TREE_SITTER_ANALYZER_ENABLE_FILE_LOG", "").lower() == "true"
59
- )
60
- file_log_level = level # Default to main logger level
61
-
62
- if not logger.handlers: # Avoid duplicate handlers
63
- # Create a safe handler that writes to stderr to avoid breaking MCP stdio
64
- handler = SafeStreamHandler()
65
- formatter = logging.Formatter(
66
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
67
- )
68
- handler.setFormatter(formatter)
69
- logger.addHandler(handler)
70
-
71
- # Optional file logging for debugging when launched by clients (e.g., Cursor)
72
- # This helps diagnose cases where stdio is captured by the client and logs are hidden.
73
- # Only enabled when TREE_SITTER_ANALYZER_ENABLE_FILE_LOG is set to 'true'
74
- if enable_file_log:
75
- try:
76
- # Determine log directory
77
- log_dir = os.environ.get("TREE_SITTER_ANALYZER_LOG_DIR")
78
- if log_dir:
79
- # Use specified directory
80
- log_path = Path(log_dir) / "tree_sitter_analyzer.log"
81
- # Ensure directory exists
82
- Path(log_dir).mkdir(parents=True, exist_ok=True)
83
- else:
84
- # Use system temporary directory
85
- temp_dir = tempfile.gettempdir()
86
- log_path = Path(temp_dir) / "tree_sitter_analyzer.log"
87
-
88
- # Determine file log level
89
- file_log_level_str = os.environ.get(
90
- "TREE_SITTER_ANALYZER_FILE_LOG_LEVEL", ""
91
- ).upper()
92
- if file_log_level_str and file_log_level_str in [
93
- "DEBUG",
94
- "INFO",
95
- "WARNING",
96
- "ERROR",
97
- ]:
98
- if file_log_level_str == "DEBUG":
99
- file_log_level = logging.DEBUG
100
- elif file_log_level_str == "INFO":
101
- file_log_level = logging.INFO
102
- elif file_log_level_str == "WARNING":
103
- file_log_level = logging.WARNING
104
- elif file_log_level_str == "ERROR":
105
- file_log_level = logging.ERROR
106
- else:
107
- # Use same level as main logger
108
- file_log_level = level
109
-
110
- file_handler = logging.FileHandler(str(log_path), encoding="utf-8")
111
- file_handler.setFormatter(formatter)
112
- file_handler.setLevel(file_log_level)
113
- logger.addHandler(file_handler)
114
-
115
- # Log the file location for debugging purposes
116
- if hasattr(sys, "stderr") and hasattr(sys.stderr, "write"):
117
- try:
118
- sys.stderr.write(
119
- f"[logging_setup] File logging enabled: {log_path}\n"
120
- )
121
- except Exception:
122
- ...
123
-
124
- except Exception as e:
125
- # Never let logging configuration break runtime behavior; log to stderr if possible
126
- if hasattr(sys, "stderr") and hasattr(sys.stderr, "write"):
127
- try:
128
- sys.stderr.write(
129
- f"[logging_setup] file handler init skipped: {e}\n"
130
- )
131
- except Exception:
132
- ...
133
-
134
- # Set the logger level to the minimum of main level and file log level
135
- # This ensures that all messages that should go to any handler are processed
136
- final_level = level
137
- if enable_file_log:
138
- # Use the minimum level to ensure all messages reach their intended handlers
139
- final_level = min(level, file_log_level)
140
-
141
- logger.setLevel(final_level)
142
-
143
- # For test loggers, ensure they don't inherit from parent and force level
144
- if logger.name.startswith("test_"):
145
- logger.propagate = False
146
- # Force the level setting for test loggers
147
- logger.level = level
148
-
149
- return logger
150
-
151
-
152
- class SafeStreamHandler(logging.StreamHandler):
153
24
  """
154
- A StreamHandler that safely handles closed streams
25
+ Setup unified logger for the project using LoggerManager
26
+
27
+ This function now delegates to the LoggerManager for unified logging
28
+ while maintaining backward compatibility with the existing API.
29
+
30
+ Args:
31
+ name: Logger name
32
+ level: Log level (string or int)
33
+
34
+ Returns:
35
+ Configured logger instance
155
36
  """
156
-
157
- def __init__(self, stream=None):
158
- # Default to sys.stderr to keep stdout clean for MCP stdio
159
- super().__init__(stream if stream is not None else sys.stderr)
160
-
161
- def emit(self, record: Any) -> None:
162
- """
163
- Emit a record, safely handling closed streams and pytest capture
164
- """
165
- try:
166
- # Check if stream is closed before writing
167
- if hasattr(self.stream, "closed") and self.stream.closed:
168
- return
169
-
170
- # Check if we can write to the stream
171
- if not hasattr(self.stream, "write"):
172
- return
173
-
174
- # Special handling for pytest capture scenarios
175
- # Check if this is a pytest capture stream that might be problematic
176
- stream_name = getattr(self.stream, "name", "")
177
- if stream_name is None or "pytest" in str(type(self.stream)).lower():
178
- # For pytest streams, be extra cautious
179
- try:
180
- # Just try to emit without any pre-checks
181
- super().emit(record)
182
- return
183
- except (ValueError, OSError, AttributeError, UnicodeError):
184
- return
185
-
186
- # Additional safety checks for stream validity for non-pytest streams
187
- try:
188
- # Test if we can actually write to the stream without flushing
189
- # Avoid flush() as it can cause "I/O operation on closed file" in pytest
190
- if hasattr(self.stream, "writable") and not self.stream.writable():
191
- return
192
- except (ValueError, OSError, AttributeError, UnicodeError):
193
- return
194
-
195
- super().emit(record)
196
- except (ValueError, OSError, AttributeError, UnicodeError):
197
- # Silently ignore I/O errors during shutdown or pytest capture
198
- pass
199
- except Exception:
200
- # For any other unexpected errors, silently ignore to prevent test failures
201
- pass
37
+ # Use the unified logger manager
38
+ logger_manager = get_logger_manager()
39
+ return logger_manager.get_logger(name, level)
202
40
 
203
41
 
204
42
  def setup_safe_logging_shutdown() -> None:
@@ -369,17 +207,18 @@ def safe_print(message: str | None, level: str = "info", quiet: bool = False) ->
369
207
 
370
208
 
371
209
  def create_performance_logger(name: str) -> logging.Logger:
372
- """Create performance-focused logger"""
373
- perf_logger = logging.getLogger(f"{name}.performance")
374
-
375
- if not perf_logger.handlers:
376
- handler = SafeStreamHandler()
377
- formatter = logging.Formatter("%(asctime)s - PERF - %(message)s")
378
- handler.setFormatter(formatter)
379
- perf_logger.addHandler(handler)
380
- perf_logger.setLevel(logging.DEBUG) # Change to DEBUG level
381
-
382
- return perf_logger
210
+ """
211
+ Create performance-focused logger using unified LoggerManager
212
+
213
+ Args:
214
+ name: Base name for the performance logger
215
+
216
+ Returns:
217
+ Configured performance logger
218
+ """
219
+ logger_manager = get_logger_manager()
220
+ perf_logger_name = f"{name}.performance"
221
+ return logger_manager.get_logger(perf_logger_name, logging.DEBUG)
383
222
 
384
223
 
385
224
  # Performance logger instance
@@ -402,7 +241,7 @@ def log_performance(
402
241
  else:
403
242
  detail_str = str(details)
404
243
  message += f" - {detail_str}"
405
- perf_logger.debug(message) # Change to DEBUG level
244
+ perf_logger.debug(message)
406
245
  except (ValueError, OSError) as e:
407
246
  if hasattr(sys, "stderr") and hasattr(sys.stderr, "write"):
408
247
  try:
@@ -412,18 +251,14 @@ def log_performance(
412
251
 
413
252
 
414
253
  def setup_performance_logger() -> logging.Logger:
415
- """Set up performance logging"""
416
- perf_logger = logging.getLogger("performance")
417
-
418
- # Add handler if not already configured
419
- if not perf_logger.handlers:
420
- handler = SafeStreamHandler()
421
- formatter = logging.Formatter("%(asctime)s - Performance - %(message)s")
422
- handler.setFormatter(formatter)
423
- perf_logger.addHandler(handler)
424
- perf_logger.setLevel(logging.INFO)
425
-
426
- return perf_logger
254
+ """
255
+ Set up performance logging (unified with create_performance_logger)
256
+
257
+ Returns:
258
+ Performance logger instance
259
+ """
260
+ # Delegate to the unified create_performance_logger
261
+ return create_performance_logger("performance")
427
262
 
428
263
 
429
264
  class LoggingContext: