claude-self-reflect 3.3.1 → 4.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -135,13 +135,15 @@ def get_status() -> dict:
135
135
  # The actual structure has imported_files at the top level
136
136
  imported_files = data.get("imported_files", {})
137
137
 
138
- # Count all files in imported_files object (they are all fully imported)
138
+ # Count all files in imported_files object (only if they still exist on disk)
139
139
  for file_path in imported_files.keys():
140
140
  normalized_path = normalize_file_path(file_path)
141
141
  if normalized_path in file_to_project and normalized_path not in counted_files:
142
- project_name = file_to_project[normalized_path]
143
- project_stats[project_name]["indexed"] += 1
144
- counted_files.add(normalized_path)
142
+ # Verify file actually exists before counting it as indexed
143
+ if Path(normalized_path).exists():
144
+ project_name = file_to_project[normalized_path]
145
+ project_stats[project_name]["indexed"] += 1
146
+ counted_files.add(normalized_path)
145
147
 
146
148
  # Also check file_metadata for partially imported files
147
149
  file_metadata = data.get("file_metadata", {})
@@ -180,14 +182,17 @@ def get_status() -> dict:
180
182
  with open(watcher_state_file, 'r') as f:
181
183
  watcher_data = json.load(f)
182
184
 
183
- # Count files imported by the watcher
185
+ # Count files imported by the watcher (only if they still exist on disk)
184
186
  watcher_imports = watcher_data.get("imported_files", {})
185
187
  for file_path in watcher_imports.keys():
186
188
  normalized_path = normalize_file_path(file_path)
189
+ # CRITICAL: Only count if file exists on disk AND is in our project list
187
190
  if normalized_path in file_to_project and normalized_path not in counted_files:
188
- project_name = file_to_project[normalized_path]
189
- project_stats[project_name]["indexed"] += 1
190
- counted_files.add(normalized_path)
191
+ # Verify file actually exists before counting it as indexed
192
+ if Path(normalized_path).exists():
193
+ project_name = file_to_project[normalized_path]
194
+ project_stats[project_name]["indexed"] += 1
195
+ counted_files.add(normalized_path)
191
196
  except (json.JSONDecodeError, KeyError, OSError):
192
197
  # If watcher file is corrupted or unreadable, continue
193
198
  pass
@@ -0,0 +1,286 @@
1
+ """Ultra-fast status checker using unified state management.
2
+
3
+ This module reads from the unified state file for indexing status.
4
+ Designed for <20ms execution time to support status bars and shell scripts.
5
+ """
6
+
7
+ import json
8
+ import time
9
+ import sys
10
+ from pathlib import Path
11
+ from collections import defaultdict
12
+
13
+ # Add scripts directory to path for unified state manager
14
+ scripts_dir = Path(__file__).parent.parent.parent / "scripts"
15
+ if scripts_dir.exists():
16
+ sys.path.insert(0, str(scripts_dir))
17
+
18
+ try:
19
+ from unified_state_manager import UnifiedStateManager
20
+ except ImportError:
21
+ # Fallback to reading JSON directly if manager not available
22
+ UnifiedStateManager = None
23
+
24
+ # Try to import shared utilities
25
+ try:
26
+ from shared_utils import (
27
+ extract_project_name_from_path,
28
+ get_claude_projects_dir,
29
+ get_csr_config_dir
30
+ )
31
+ except ImportError:
32
+ # Fallback implementations
33
+ def extract_project_name_from_path(file_path: str) -> str:
34
+ """Extract project name from JSONL file path."""
35
+ path_obj = Path(file_path)
36
+ dir_name = path_obj.parent.name
37
+
38
+ if dir_name.startswith('-') and 'projects' in dir_name:
39
+ parts = dir_name.split('-')
40
+ try:
41
+ projects_idx = parts.index('projects')
42
+ if projects_idx + 1 < len(parts):
43
+ project_parts = parts[projects_idx + 1:]
44
+ return '-'.join(project_parts)
45
+ except ValueError:
46
+ pass
47
+
48
+ return dir_name.lstrip('-')
49
+
50
+ def get_claude_projects_dir() -> Path:
51
+ """Get Claude projects directory."""
52
+ import os
53
+ if 'CLAUDE_PROJECTS_DIR' in os.environ:
54
+ return Path(os.environ['CLAUDE_PROJECTS_DIR'])
55
+ return Path.home() / ".claude" / "projects"
56
+
57
+ def get_csr_config_dir() -> Path:
58
+ """Get CSR config directory."""
59
+ import os
60
+ if 'CSR_CONFIG_DIR' in os.environ:
61
+ return Path(os.environ['CSR_CONFIG_DIR'])
62
+ return Path.home() / '.claude-self-reflect' / 'config'
63
+
64
+
65
+ def get_watcher_status() -> dict:
66
+ """Get streaming watcher status from unified state."""
67
+ try:
68
+ if UnifiedStateManager:
69
+ manager = UnifiedStateManager()
70
+ state = manager.read_state()
71
+
72
+ # Get watcher status from importers section
73
+ watcher_info = state.get("importers", {}).get("streaming", {})
74
+ last_run = watcher_info.get("last_run")
75
+
76
+ if last_run:
77
+ from datetime import datetime, timezone
78
+ last_run_dt = datetime.fromisoformat(last_run)
79
+ now = datetime.now(timezone.utc)
80
+ age_seconds = (now - last_run_dt).total_seconds()
81
+ is_active = age_seconds < 120 # Active if updated in last 2 minutes
82
+ else:
83
+ is_active = False
84
+ age_seconds = float('inf')
85
+
86
+ return {
87
+ "running": is_active,
88
+ "files_processed": watcher_info.get("files_processed", 0),
89
+ "last_update_seconds": int(age_seconds) if age_seconds != float('inf') else None,
90
+ "status": "🟢 active" if is_active else "🔴 inactive"
91
+ }
92
+ else:
93
+ # Fallback to old method if UnifiedStateManager not available
94
+ watcher_state_file = get_csr_config_dir() / "csr-watcher.json"
95
+
96
+ if not watcher_state_file.exists():
97
+ return {"running": False, "status": "not configured"}
98
+
99
+ with open(watcher_state_file) as f:
100
+ state = json.load(f)
101
+
102
+ file_age = time.time() - watcher_state_file.stat().st_mtime
103
+ is_active = file_age < 120
104
+
105
+ return {
106
+ "running": is_active,
107
+ "files_processed": len(state.get("imported_files", {})),
108
+ "last_update_seconds": int(file_age),
109
+ "status": "🟢 active" if is_active else "🔴 inactive"
110
+ }
111
+ except Exception as e:
112
+ return {"running": False, "status": f"error: {str(e)[:50]}"}
113
+
114
+
115
+ def get_status() -> dict:
116
+ """Get indexing status from unified state with per-project breakdown.
117
+
118
+ Returns:
119
+ dict: JSON structure with overall and per-project indexing status
120
+ """
121
+ start_time = time.time()
122
+
123
+ try:
124
+ if UnifiedStateManager:
125
+ # Use unified state manager for fast access
126
+ manager = UnifiedStateManager()
127
+ status = manager.get_status()
128
+
129
+ # Get per-project breakdown
130
+ project_stats = defaultdict(lambda: {"indexed": 0, "total": 0})
131
+
132
+ # Count total JSONL files per project
133
+ projects_dir = get_claude_projects_dir()
134
+ if projects_dir.exists():
135
+ for jsonl_file in projects_dir.glob("**/*.jsonl"):
136
+ project_name = extract_project_name_from_path(str(jsonl_file))
137
+ project_stats[project_name]["total"] += 1
138
+
139
+ # Count indexed files per project from unified state
140
+ state = manager.read_state()
141
+ for file_path, metadata in state.get("files", {}).items():
142
+ if metadata.get("status") == "completed":
143
+ project_name = extract_project_name_from_path(file_path)
144
+ if project_name in project_stats:
145
+ project_stats[project_name]["indexed"] += 1
146
+
147
+ # Format response
148
+ result = {
149
+ "overall": {
150
+ "percentage": status["percentage"],
151
+ "indexed_files": status["indexed_files"],
152
+ "total_files": status["total_files"],
153
+ "total_chunks": status["total_chunks"],
154
+ },
155
+ "watcher": get_watcher_status(),
156
+ "projects": dict(project_stats),
157
+ "execution_time_ms": round((time.time() - start_time) * 1000, 2)
158
+ }
159
+
160
+ return result
161
+
162
+ else:
163
+ # Fallback to old multi-file method
164
+ return get_status_legacy()
165
+
166
+ except Exception as e:
167
+ return {
168
+ "error": str(e),
169
+ "execution_time_ms": round((time.time() - start_time) * 1000, 2)
170
+ }
171
+
172
+
173
+ def get_status_legacy() -> dict:
174
+ """Legacy status method reading from multiple files (fallback)."""
175
+ projects_dir = get_claude_projects_dir()
176
+ project_stats = defaultdict(lambda: {"indexed": 0, "total": 0})
177
+
178
+ # Count total JSONL files per project
179
+ if projects_dir.exists():
180
+ for jsonl_file in projects_dir.glob("**/*.jsonl"):
181
+ file_str = str(jsonl_file)
182
+ project_name = extract_project_name_from_path(file_str)
183
+ project_stats[project_name]["total"] += 1
184
+
185
+ # Read imported-files.json to count indexed files
186
+ config_dir = get_csr_config_dir()
187
+ imported_files_path = config_dir / "imported-files.json"
188
+
189
+ if imported_files_path.exists():
190
+ try:
191
+ with open(imported_files_path, 'r') as f:
192
+ data = json.load(f)
193
+ imported_files = data.get("imported_files", {})
194
+
195
+ for file_path in imported_files.keys():
196
+ # Normalize path
197
+ if file_path.startswith("/logs/"):
198
+ projects_path = str(get_claude_projects_dir())
199
+ normalized_path = file_path.replace("/logs/", projects_path + "/", 1)
200
+ else:
201
+ normalized_path = file_path
202
+
203
+ # Check if file exists and count it
204
+ if Path(normalized_path).exists():
205
+ project_name = extract_project_name_from_path(normalized_path)
206
+ if project_name in project_stats:
207
+ project_stats[project_name]["indexed"] += 1
208
+ except Exception:
209
+ pass
210
+
211
+ # Calculate overall stats
212
+ total_files = sum(p["total"] for p in project_stats.values())
213
+ indexed_files = sum(p["indexed"] for p in project_stats.values())
214
+ percentage = (indexed_files / max(total_files, 1)) * 100
215
+
216
+ return {
217
+ "overall": {
218
+ "percentage": percentage,
219
+ "indexed_files": indexed_files,
220
+ "total_files": total_files
221
+ },
222
+ "watcher": get_watcher_status(),
223
+ "projects": dict(project_stats)
224
+ }
225
+
226
+
227
+ def main():
228
+ """CLI interface for status checking."""
229
+ import argparse
230
+
231
+ parser = argparse.ArgumentParser(description="Check Claude Self-Reflect indexing status")
232
+ parser.add_argument("--format", choices=["json", "text"], default="json",
233
+ help="Output format (default: json)")
234
+ parser.add_argument("--watch", action="store_true",
235
+ help="Watch mode - update every 2 seconds")
236
+
237
+ args = parser.parse_args()
238
+
239
+ if args.watch:
240
+ try:
241
+ while True:
242
+ status = get_status()
243
+ if args.format == "json":
244
+ print(json.dumps(status, indent=2))
245
+ else:
246
+ overall = status.get("overall", {})
247
+ print(f"Indexing: {overall.get('percentage', 0):.1f}% "
248
+ f"({overall.get('indexed_files', 0)}/{overall.get('total_files', 0)})")
249
+
250
+ watcher = status.get("watcher", {})
251
+ print(f"Watcher: {watcher.get('status', '🔴 inactive')}")
252
+
253
+ if status.get("execution_time_ms"):
254
+ print(f"Time: {status['execution_time_ms']}ms")
255
+
256
+ print("\n" + "-" * 40)
257
+ time.sleep(2)
258
+
259
+ except KeyboardInterrupt:
260
+ print("\nStopped")
261
+ else:
262
+ status = get_status()
263
+ if args.format == "json":
264
+ print(json.dumps(status, indent=2))
265
+ else:
266
+ overall = status.get("overall", {})
267
+ print(f"Indexing: {overall.get('percentage', 0):.1f}% "
268
+ f"({overall.get('indexed_files', 0)}/{overall.get('total_files', 0)} files)")
269
+
270
+ watcher = status.get("watcher", {})
271
+ print(f"Watcher: {watcher.get('status', '🔴 inactive')}")
272
+
273
+ # Show per-project if available
274
+ projects = status.get("projects", {})
275
+ if projects:
276
+ print("\nProjects:")
277
+ for proj, stats in projects.items():
278
+ pct = (stats["indexed"] / max(stats["total"], 1)) * 100
279
+ print(f" {proj}: {pct:.1f}% ({stats['indexed']}/{stats['total']})")
280
+
281
+ if status.get("execution_time_ms"):
282
+ print(f"\nExecution time: {status['execution_time_ms']}ms")
283
+
284
+
285
+ if __name__ == "__main__":
286
+ main()
@@ -0,0 +1,153 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test file with intentional quality issues for testing quality-fixer agent.
4
+ This file contains patterns that should be fixed:
5
+ - sync file operations that should be async
6
+ - global variables
7
+ - print statements
8
+ - long functions
9
+ """
10
+
11
+ import os
12
+ import json
13
+ import asyncio
14
+ import logging
15
+ import aiofiles
16
+ from typing import List, Dict, Any
17
+
18
+ # Set up logger instead of print statements
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # Configuration management class instead of global variables
22
+ class ConfigManager:
23
+ def __init__(self):
24
+ self.config = None
25
+ self.counter = 0
26
+
27
+ async def load_config(config_manager: ConfigManager) -> Dict[str, Any]:
28
+ """Load config using async file operations."""
29
+ # Async file operation using aiofiles
30
+ async with aiofiles.open("config.json", "r") as f:
31
+ content = await f.read()
32
+ config_manager.config = json.loads(content)
33
+
34
+ logger.info(f"Config loaded: {config_manager.config}")
35
+ return config_manager.config
36
+
37
+ async def save_data(data: Dict[str, Any], config_manager: ConfigManager) -> None:
38
+ """Save data using async operations."""
39
+ config_manager.counter += 1
40
+
41
+ # Async file operation using aiofiles
42
+ async with aiofiles.open("data.json", "w") as f:
43
+ await f.write(json.dumps(data))
44
+
45
+ logger.info(f"Data saved, counter: {config_manager.counter}")
46
+
47
+ def validate_items(items: List[str]) -> List[str]:
48
+ """Validate input items."""
49
+ valid_items = []
50
+ for item in items:
51
+ if not item:
52
+ logger.warning(f"Invalid item: {item}")
53
+ continue
54
+ valid_items.append(item)
55
+ return valid_items
56
+
57
+ def process_items(items: List[str]) -> List[str]:
58
+ """Process each item."""
59
+ return [item.upper() for item in items]
60
+
61
+ def filter_results(results: List[str]) -> List[str]:
62
+ """Filter results by length."""
63
+ return [result for result in results if len(result) > 3]
64
+
65
+ def create_summary(items: List[str], results: List[str], filtered: List[str]) -> Dict[str, int]:
66
+ """Create processing summary."""
67
+ return {
68
+ "total": len(items),
69
+ "processed": len(results),
70
+ "filtered": len(filtered)
71
+ }
72
+
73
+ async def save_results(filtered: List[str]) -> None:
74
+ """Save results to file asynchronously."""
75
+ async with aiofiles.open("results.txt", "w") as f:
76
+ for item in filtered:
77
+ await f.write(f"{item}\n")
78
+
79
+ async def process_items_improved(items: List[str], config_manager: ConfigManager) -> Dict[str, Any]:
80
+ """Improved function broken down into smaller functions."""
81
+ # Step 1: Validate items
82
+ valid_items = validate_items(items)
83
+
84
+ # Step 2: Process each item
85
+ results = process_items(valid_items)
86
+
87
+ # Step 3: Filter results
88
+ filtered = filter_results(results)
89
+
90
+ # Step 4: Sort results
91
+ filtered.sort()
92
+
93
+ # Step 5: Create summary
94
+ summary = create_summary(items, results, filtered)
95
+
96
+ # Step 6: Log summary
97
+ logger.info(f"Processing complete: {summary}")
98
+
99
+ # Step 7: Save results asynchronously
100
+ await save_results(filtered)
101
+
102
+ # Step 8: Update counter
103
+ config_manager.counter += len(filtered)
104
+
105
+ # Step 9: Create report
106
+ report = {
107
+ "summary": summary,
108
+ "results": filtered,
109
+ "counter": config_manager.counter
110
+ }
111
+
112
+ return report
113
+
114
+ async def debug_function() -> None:
115
+ """Function with debug statements."""
116
+ logger.debug("Debug: Starting function")
117
+
118
+ # Reading file asynchronously
119
+ if os.path.exists("debug.log"):
120
+ async with aiofiles.open("debug.log", "r") as f:
121
+ log_data = await f.read()
122
+ logger.debug(f"Log data: {log_data}")
123
+
124
+ logger.debug("Debug: Function complete")
125
+
126
+ # Using var instead of let/const (for JS patterns if analyzed)
127
+ var_example = "This would be flagged in JS"
128
+
129
+ async def main() -> None:
130
+ """Main execution function."""
131
+ # Set up logging
132
+ logging.basicConfig(level=logging.INFO)
133
+
134
+ # Initialize config manager
135
+ config_manager = ConfigManager()
136
+
137
+ logger.info("Starting application...")
138
+
139
+ try:
140
+ # Note: These operations would fail without actual files, but structure is correct
141
+ await load_config(config_manager)
142
+ await process_items_improved(["test", "data", "example"], config_manager)
143
+ await debug_function()
144
+ except FileNotFoundError:
145
+ logger.warning("Required files not found - this is expected in test context")
146
+ except Exception as e:
147
+ logger.error(f"Application error: {e}")
148
+
149
+ logger.info("Application complete!")
150
+
151
+ if __name__ == "__main__":
152
+ # Run async main function
153
+ asyncio.run(main())
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-self-reflect",
3
- "version": "3.3.1",
3
+ "version": "4.0.1",
4
4
  "description": "Give Claude perfect memory of all your conversations - Installation wizard for Python MCP server",
5
5
  "keywords": [
6
6
  "claude",
@@ -50,6 +50,9 @@ class FinalASTGrepAnalyzer:
50
50
  with open(file_path, 'r', encoding='utf-8') as f:
51
51
  content = f.read()
52
52
 
53
+ # Count lines of code for normalization
54
+ lines_of_code = len(content.splitlines())
55
+
53
56
  # Create SgRoot for the detected language
54
57
  sg_language = self._get_sg_language(language)
55
58
  root = sg.SgRoot(content, sg_language)
@@ -105,8 +108,8 @@ class FinalASTGrepAnalyzer:
105
108
  'error': str(e)[:200]
106
109
  })
107
110
 
108
- # Calculate quality score
109
- quality_score = self.registry.calculate_quality_score(all_matches)
111
+ # Calculate quality score with LOC normalization
112
+ quality_score = self.registry.calculate_quality_score(all_matches, loc=lines_of_code)
110
113
 
111
114
  # Count good vs bad patterns
112
115
  good_matches = [m for m in all_matches if m['quality'] == 'good']