claude-self-reflect 3.3.1 → 4.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,7 +15,7 @@ import fcntl
15
15
  import time
16
16
  import argparse
17
17
  from pathlib import Path
18
- from datetime import datetime
18
+ from datetime import datetime, timezone
19
19
  from typing import List, Dict, Any, Optional, Set
20
20
  import logging
21
21
 
@@ -34,6 +34,9 @@ except ImportError:
34
34
  scripts_dir = Path(__file__).parent
35
35
  sys.path.insert(0, str(scripts_dir))
36
36
 
37
+ # Import UnifiedStateManager
38
+ from unified_state_manager import UnifiedStateManager
39
+
37
40
  from qdrant_client import QdrantClient
38
41
  from qdrant_client.models import PointStruct, Distance, VectorParams
39
42
 
@@ -72,32 +75,15 @@ MAX_FILES_EDITED = 20
72
75
  MAX_TOOLS_USED = 15
73
76
  MAX_CONCEPT_MESSAGES = 50
74
77
 
75
- # Robust cross-platform state file resolution
76
- def get_default_state_file():
77
- """Determine the default state file location with cross-platform support."""
78
- from pathlib import Path
79
-
80
- # Check if we're in Docker (more reliable than just checking /config)
81
- docker_indicators = [
82
- Path("/.dockerenv").exists(), # Docker creates this file
83
- os.path.exists("/config") and os.access("/config", os.W_OK) # Mounted config dir with write access
84
- ]
85
-
86
- if any(docker_indicators):
87
- return "/config/imported-files.json"
88
-
89
- # Use pathlib for cross-platform home directory path
90
- home_state = Path.home() / ".claude-self-reflect" / "config" / "imported-files.json"
91
- return str(home_state)
92
-
93
- # Get state file path with env override support
78
+ # Initialize UnifiedStateManager
79
+ # Support legacy STATE_FILE environment variable
94
80
  env_state = os.getenv("STATE_FILE")
95
81
  if env_state:
96
- # Normalize any user-provided path to absolute
97
82
  from pathlib import Path
98
- STATE_FILE = str(Path(env_state).expanduser().resolve())
83
+ state_file_path = Path(env_state).expanduser().resolve()
84
+ state_manager = UnifiedStateManager(state_file_path)
99
85
  else:
100
- STATE_FILE = get_default_state_file()
86
+ state_manager = UnifiedStateManager() # Uses default location
101
87
  PREFER_LOCAL_EMBEDDINGS = os.getenv("PREFER_LOCAL_EMBEDDINGS", "true").lower() == "true"
102
88
  VOYAGE_API_KEY = os.getenv("VOYAGE_KEY")
103
89
  MAX_CHUNK_SIZE = int(os.getenv("MAX_CHUNK_SIZE", "50")) # Messages per chunk
@@ -335,22 +321,27 @@ def extract_metadata_single_pass(file_path: str) -> tuple[Dict[str, Any], str, i
335
321
  "concepts": [],
336
322
  "ast_elements": [],
337
323
  "has_code_blocks": False,
338
- "total_messages": 0
324
+ "total_messages": 0,
325
+ "project_path": None # Add project path from cwd
339
326
  }
340
-
327
+
341
328
  first_timestamp = None
342
329
  message_count = 0
343
330
  all_text = []
344
-
331
+
345
332
  try:
346
333
  with open(file_path, 'r', encoding='utf-8') as f:
347
334
  for line in f:
348
335
  if not line.strip():
349
336
  continue
350
-
337
+
351
338
  try:
352
339
  data = json.loads(line)
353
-
340
+
341
+ # Extract cwd (current working directory) as project path
342
+ if metadata["project_path"] is None and 'cwd' in data:
343
+ metadata["project_path"] = data.get('cwd')
344
+
354
345
  # Get timestamp from first valid entry
355
346
  if first_timestamp is None and 'timestamp' in data:
356
347
  first_timestamp = data.get('timestamp')
@@ -681,18 +672,13 @@ def stream_import_file(jsonl_file: Path, collection_name: str, project_path: Pat
681
672
 
682
673
  except Exception as e:
683
674
  logger.error(f"Failed to import {jsonl_file}: {e}")
675
+ # Mark file as failed in state manager
676
+ try:
677
+ state_manager.mark_file_failed(str(jsonl_file), str(e))
678
+ except Exception as state_error:
679
+ logger.warning(f"Could not mark file as failed in state: {state_error}")
684
680
  return 0
685
681
 
686
- def _locked_open(path, mode):
687
- """Open file with exclusive lock for concurrent safety."""
688
- f = open(path, mode)
689
- try:
690
- fcntl.flock(f.fileno(), fcntl.LOCK_EX)
691
- except Exception:
692
- f.close()
693
- raise
694
- return f
695
-
696
682
  def _with_retries(fn, attempts=3, base_sleep=0.5):
697
683
  """Execute function with retries and exponential backoff."""
698
684
  for i in range(attempts):
@@ -704,66 +690,78 @@ def _with_retries(fn, attempts=3, base_sleep=0.5):
704
690
  time.sleep(base_sleep * (2 ** i))
705
691
  logger.debug(f"Retrying after error: {e}")
706
692
 
707
- def load_state() -> dict:
708
- """Load import state with file locking."""
709
- if os.path.exists(STATE_FILE):
710
- try:
711
- with _locked_open(STATE_FILE, 'r') as f:
712
- return json.load(f)
713
- except Exception as e:
714
- logger.warning(f"Failed to load state: {e}")
715
- return {"imported_files": {}}
716
-
717
- def save_state(state: dict):
718
- """Save import state with atomic write."""
719
- # Fix: Handle case where STATE_FILE has no directory component
720
- state_dir = os.path.dirname(STATE_FILE)
721
- if state_dir:
722
- os.makedirs(state_dir, exist_ok=True)
723
-
724
- # Use atomic write with locking to prevent corruption
725
- temp_file = f"{STATE_FILE}.tmp"
726
- with _locked_open(temp_file, 'w') as f:
727
- json.dump(state, f, indent=2)
728
- f.flush()
729
- os.fsync(f.fileno())
730
-
731
- # Atomic rename (on POSIX systems)
732
- os.replace(temp_file, STATE_FILE)
733
-
734
- def should_import_file(file_path: Path, state: dict) -> bool:
735
- """Check if file should be imported."""
736
- file_str = str(file_path)
737
- if file_str in state.get("imported_files", {}):
738
- file_info = state["imported_files"][file_str]
739
- last_modified = file_path.stat().st_mtime
740
-
741
- # Check if file has been modified
742
- if file_info.get("last_modified") != last_modified:
743
- logger.info(f"File modified, will re-import: {file_path.name}")
744
- return True
745
-
746
- # Check for suspiciously low chunk counts (likely failed imports)
747
- chunks = file_info.get("chunks", 0)
748
- file_size_kb = file_path.stat().st_size / 1024
749
-
750
- # Heuristic: Files > 10KB should have more than 2 chunks
751
- if file_size_kb > 10 and chunks <= 2:
752
- logger.warning(f"File has suspiciously low chunks ({chunks}) for size {file_size_kb:.1f}KB, will re-import: {file_path.name}")
753
- return True
754
-
755
- logger.info(f"Skipping unchanged file: {file_path.name}")
756
- return False
757
- return True
758
-
759
- def update_file_state(file_path: Path, state: dict, chunks: int):
760
- """Update state for imported file."""
761
- file_str = str(file_path)
762
- state["imported_files"][file_str] = {
763
- "imported_at": datetime.now().isoformat(),
764
- "last_modified": file_path.stat().st_mtime,
765
- "chunks": chunks
766
- }
693
+ def should_import_file(file_path: Path) -> bool:
694
+ """Check if file should be imported using UnifiedStateManager."""
695
+ try:
696
+ # Get imported files from state manager
697
+ imported_files = state_manager.get_imported_files()
698
+
699
+ # Normalize the file path for comparison
700
+ normalized_path = state_manager.normalize_path(str(file_path))
701
+
702
+ if normalized_path in imported_files:
703
+ file_info = imported_files[normalized_path]
704
+
705
+ # Skip if file failed and we haven't reached retry limit
706
+ if file_info.get("status") == "failed" and file_info.get("retry_count", 0) >= 3:
707
+ logger.info(f"Skipping failed file (max retries reached): {file_path.name}")
708
+ return False
709
+
710
+ # Get file modification time for comparison
711
+ last_modified = file_path.stat().st_mtime
712
+ stored_modified = file_info.get("last_modified")
713
+
714
+ # Check if file has been modified (convert stored timestamp to float if needed)
715
+ if stored_modified:
716
+ try:
717
+ # Parse ISO timestamp to float for comparison
718
+ stored_time = datetime.fromisoformat(stored_modified.replace("Z", "+00:00")).timestamp()
719
+ if abs(last_modified - stored_time) > 1: # Allow 1 second tolerance
720
+ logger.info(f"File modified, will re-import: {file_path.name}")
721
+ return True
722
+ except (ValueError, TypeError):
723
+ # If we can't parse the stored time, re-import to be safe
724
+ logger.warning(f"Could not parse stored modification time, will re-import: {file_path.name}")
725
+ return True
726
+
727
+ # Check for suspiciously low chunk counts (likely failed imports)
728
+ chunks = file_info.get("chunks", 0)
729
+ file_size_kb = file_path.stat().st_size / 1024
730
+
731
+ # Heuristic: Files > 10KB should have more than 2 chunks
732
+ if file_size_kb > 10 and chunks <= 2 and file_info.get("status") != "failed":
733
+ logger.warning(f"File has suspiciously low chunks ({chunks}) for size {file_size_kb:.1f}KB, will re-import: {file_path.name}")
734
+ return True
735
+
736
+ # Skip if successfully imported
737
+ if file_info.get("status") == "completed":
738
+ logger.info(f"Skipping successfully imported file: {file_path.name}")
739
+ return False
740
+
741
+ return True
742
+
743
+ except Exception as e:
744
+ logger.warning(f"Error checking import status for {file_path}: {e}")
745
+ return True # Default to importing if we can't check status
746
+
747
+ def update_file_state(file_path: Path, chunks: int, collection_name: str):
748
+ """Update state for imported file using UnifiedStateManager."""
749
+ try:
750
+ # Determine embedding mode from collection suffix
751
+ embedding_mode = "local" if collection_suffix == "local" else "cloud"
752
+
753
+ # Add file to state manager
754
+ state_manager.add_imported_file(
755
+ file_path=str(file_path),
756
+ chunks=chunks,
757
+ importer="streaming",
758
+ collection=collection_name,
759
+ embedding_mode=embedding_mode,
760
+ status="completed"
761
+ )
762
+ logger.debug(f"Updated state for {file_path.name}: {chunks} chunks")
763
+ except Exception as e:
764
+ logger.error(f"Failed to update state for {file_path}: {e}")
767
765
 
768
766
  def main():
769
767
  """Main import function."""
@@ -793,9 +791,9 @@ def main():
793
791
  collection_suffix = "voyage"
794
792
  logger.info("Switched to Voyage AI embeddings (dimension: 1024)")
795
793
 
796
- # Load state
797
- state = load_state()
798
- logger.info(f"Loaded state with {len(state.get('imported_files', {}))} previously imported files")
794
+ # Get status from state manager
795
+ status = state_manager.get_status()
796
+ logger.info(f"Loaded state with {status['indexed_files']} previously imported files")
799
797
 
800
798
  # Find all projects
801
799
  # Use LOGS_DIR env var, or fall back to Claude projects directory, then /logs for Docker
@@ -843,7 +841,7 @@ def main():
843
841
  logger.info(f"Reached limit of {args.limit} files, stopping import")
844
842
  break
845
843
 
846
- if should_import_file(jsonl_file, state):
844
+ if should_import_file(jsonl_file):
847
845
  chunks = stream_import_file(jsonl_file, collection_name, project_dir)
848
846
  files_processed += 1
849
847
  if chunks > 0:
@@ -863,8 +861,7 @@ def main():
863
861
 
864
862
  if actual_count > 0:
865
863
  logger.info(f"Verified {actual_count} points in Qdrant for {conversation_id}")
866
- update_file_state(jsonl_file, state, chunks)
867
- save_state(state)
864
+ update_file_state(jsonl_file, chunks, collection_name)
868
865
  total_imported += 1
869
866
  else:
870
867
  logger.error(f"No points found in Qdrant for {conversation_id} despite {chunks} chunks processed - not marking as imported")
@@ -878,6 +875,11 @@ def main():
878
875
  # Critical fix: Don't mark files with 0 chunks as imported
879
876
  # This allows retry on next run
880
877
  logger.warning(f"File produced 0 chunks, not marking as imported: {jsonl_file.name}")
878
+ # Mark as failed so we don't keep retrying indefinitely
879
+ try:
880
+ state_manager.mark_file_failed(str(jsonl_file), "File produced 0 chunks during import")
881
+ except Exception as state_error:
882
+ logger.warning(f"Could not mark file as failed in state: {state_error}")
881
883
 
882
884
  logger.info(f"Import complete: processed {total_imported} files")
883
885
 
@@ -106,7 +106,7 @@ class SessionQualityTracker:
106
106
 
107
107
  return edited_files
108
108
 
109
- def analyze_session_quality(self, session_file: Optional[Path] = None) -> Dict[str, Any]:
109
+ def analyze_session_quality(self, session_file: Optional[Path] = None, use_tracker: bool = False) -> Dict[str, Any]:
110
110
  """
111
111
  Analyze code quality for all files edited in current session.
112
112
  Returns quality report with actionable insights.
@@ -114,6 +114,22 @@ class SessionQualityTracker:
114
114
  # Update patterns (uses cache, <100ms)
115
115
  check_and_update_patterns()
116
116
 
117
+ # Check for session edit tracker first (priority mode)
118
+ if use_tracker or (not session_file):
119
+ tracker_file = Path.home() / ".claude-self-reflect" / "current_session_edits.json"
120
+ if tracker_file.exists():
121
+ try:
122
+ with open(tracker_file, 'r') as f:
123
+ tracker_data = json.load(f)
124
+ edited_files = set(tracker_data.get('edited_files', []))
125
+ if edited_files:
126
+ logger.info(f"Using session tracker: {len(edited_files)} files edited in session")
127
+ self.current_session_id = 'active_session'
128
+ # Use Session scope label for tracked edits
129
+ return self._analyze_files_with_scope(edited_files, scope_label='Session')
130
+ except Exception as e:
131
+ logger.debug(f"Error reading tracker file: {e}")
132
+
117
133
  # Find active session if not provided
118
134
  if not session_file:
119
135
  session_file = self.find_active_session()
@@ -188,7 +204,7 @@ class SessionQualityTracker:
188
204
  'avg_quality_score': round(avg_quality, 3),
189
205
  'total_issues': total_issues,
190
206
  'total_good_patterns': total_good_patterns,
191
- 'quality_grade': self._get_quality_grade(avg_quality)
207
+ 'quality_grade': self._get_quality_grade(avg_quality, total_issues)
192
208
  },
193
209
  'file_reports': file_reports,
194
210
  'actionable_items': self._generate_actionable_items(file_reports),
@@ -212,20 +228,92 @@ class SessionQualityTracker:
212
228
  top_issues.sort(key=lambda x: x['count'], reverse=True)
213
229
  return top_issues[:5] # Top 5 issues
214
230
 
215
- def _get_quality_grade(self, score: float) -> str:
216
- """Convert quality score to letter grade."""
217
- if score >= 0.9:
218
- return 'A+'
219
- elif score >= 0.8:
220
- return 'A'
221
- elif score >= 0.7:
222
- return 'B'
223
- elif score >= 0.6:
224
- return 'C'
225
- elif score >= 0.5:
226
- return 'D'
227
- else:
231
+ def _get_quality_grade(self, score: float, total_issues: int = 0) -> str:
232
+ """
233
+ Convert quality score to letter grade.
234
+ Based on consensus: issues should dominate grading.
235
+
236
+ Grade boundaries (adjusted for issue count):
237
+ - A+: score >= 0.97 AND issues <= 5
238
+ - A: score >= 0.93 AND issues <= 20
239
+ - B: score >= 0.83 AND issues <= 50
240
+ - C: score >= 0.73 AND issues <= 100
241
+ - D: score >= 0.60
242
+ - F: score < 0.60
243
+ """
244
+ # Hard caps based on issue count (industry standard)
245
+ if total_issues > 200:
228
246
  return 'F'
247
+ elif total_issues > 100:
248
+ # Many issues - max grade is C
249
+ if score >= 0.77:
250
+ return 'C+'
251
+ elif score >= 0.73:
252
+ return 'C'
253
+ elif score >= 0.70:
254
+ return 'C-'
255
+ elif score >= 0.60:
256
+ return 'D'
257
+ else:
258
+ return 'F'
259
+ elif total_issues > 50:
260
+ # Moderate issues - max grade is B
261
+ if score >= 0.87:
262
+ return 'B+'
263
+ elif score >= 0.83:
264
+ return 'B'
265
+ elif score >= 0.80:
266
+ return 'B-'
267
+ elif score >= 0.73:
268
+ return 'C'
269
+ elif score >= 0.60:
270
+ return 'D'
271
+ else:
272
+ return 'F'
273
+ elif total_issues > 20:
274
+ # Some issues - max grade is A-
275
+ if score >= 0.90:
276
+ return 'A-'
277
+ elif score >= 0.87:
278
+ return 'B+'
279
+ elif score >= 0.83:
280
+ return 'B'
281
+ elif score >= 0.73:
282
+ return 'C'
283
+ elif score >= 0.60:
284
+ return 'D'
285
+ else:
286
+ return 'F'
287
+ elif total_issues > 5:
288
+ # Few issues - max grade is A
289
+ if score >= 0.93:
290
+ return 'A'
291
+ elif score >= 0.90:
292
+ return 'A-'
293
+ elif score >= 0.83:
294
+ return 'B'
295
+ elif score >= 0.73:
296
+ return 'C'
297
+ elif score >= 0.60:
298
+ return 'D'
299
+ else:
300
+ return 'F'
301
+ else:
302
+ # Very few issues (0-5) - can achieve A+
303
+ if score >= 0.97:
304
+ return 'A+'
305
+ elif score >= 0.93:
306
+ return 'A'
307
+ elif score >= 0.90:
308
+ return 'A-'
309
+ elif score >= 0.83:
310
+ return 'B'
311
+ elif score >= 0.73:
312
+ return 'C'
313
+ elif score >= 0.60:
314
+ return 'D'
315
+ else:
316
+ return 'F'
229
317
 
230
318
  def _generate_actionable_items(self, file_reports: Dict) -> List[str]:
231
319
  """Generate actionable recommendations for the user."""
@@ -263,23 +351,101 @@ class SessionQualityTracker:
263
351
 
264
352
  return actions
265
353
 
354
+ def _analyze_files_with_scope(self, edited_files: set, scope_label: str = 'Session') -> Dict[str, Any]:
355
+ """
356
+ Analyze specific files with a given scope label.
357
+ Used for both session tracking and fallback modes.
358
+ """
359
+ # Analyze each edited file
360
+ file_reports = {}
361
+ total_issues = 0
362
+ total_good_patterns = 0
363
+ quality_scores = []
364
+
365
+ for file_path in edited_files:
366
+ # Only analyze code files
367
+ if any(str(file_path).endswith(ext) for ext in ['.py', '.ts', '.js', '.tsx', '.jsx']):
368
+ try:
369
+ result = self.analyzer.analyze_file(file_path)
370
+ metrics = result['quality_metrics']
371
+
372
+ file_reports[file_path] = {
373
+ 'quality_score': metrics['quality_score'],
374
+ 'good_patterns': metrics['good_patterns_found'],
375
+ 'issues': metrics['total_issues'],
376
+ 'recommendations': result.get('recommendations', [])[:3], # Top 3
377
+ 'top_issues': self._get_top_issues(result)
378
+ }
379
+
380
+ total_issues += metrics['total_issues']
381
+ total_good_patterns += metrics['good_patterns_found']
382
+ quality_scores.append(metrics['quality_score'])
383
+
384
+ # Track quality history
385
+ if file_path not in self.quality_history:
386
+ self.quality_history[file_path] = []
387
+ self.quality_history[file_path].append({
388
+ 'timestamp': datetime.now().isoformat(),
389
+ 'score': metrics['quality_score']
390
+ })
391
+
392
+ except Exception as e:
393
+ logger.error(f"Failed to analyze {file_path}: {e}")
394
+
395
+ if not file_reports:
396
+ return {
397
+ 'status': 'no_code_files',
398
+ 'session_id': self.current_session_id,
399
+ 'scope_label': scope_label,
400
+ 'message': 'No analyzable code files in session'
401
+ }
402
+
403
+ # Calculate session average
404
+ avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0
405
+
406
+ # Generate session report
407
+ return {
408
+ 'status': 'success',
409
+ 'session_id': self.current_session_id,
410
+ 'scope_label': scope_label, # Use provided scope label
411
+ 'timestamp': datetime.now().isoformat(),
412
+ 'summary': {
413
+ 'files_analyzed': len(file_reports),
414
+ 'avg_quality_score': round(avg_quality, 3),
415
+ 'total_issues': total_issues,
416
+ 'total_good_patterns': total_good_patterns,
417
+ 'quality_grade': self._get_quality_grade(avg_quality, total_issues)
418
+ },
419
+ 'file_reports': file_reports,
420
+ 'actionable_items': self._generate_actionable_items(file_reports),
421
+ 'quality_trend': self._calculate_quality_trend()
422
+ }
423
+
266
424
  def analyze_recent_files(self) -> Dict[str, Any]:
267
425
  """Analyze core project files when no session is found."""
268
- project_root = Path(__file__).parent.parent
269
-
270
- # Define core project files to analyze (not test files)
271
- core_files = [
272
- "scripts/session_quality_tracker.py",
273
- "scripts/cc-statusline-unified.py",
274
- "scripts/pattern_registry_enhanced.py",
275
- "scripts/simplified_metadata_extractor.py",
276
- "scripts/streaming-watcher.py",
277
- "scripts/quality-report.py",
278
- "mcp-server/src/server.py",
279
- "mcp-server/src/search_tools.py",
280
- "mcp-server/src/temporal_tools.py",
281
- "mcp-server/src/reflection_tools.py",
282
- ]
426
+ # Use current working directory as project root
427
+ project_root = Path.cwd()
428
+
429
+ # Find code files in the project dynamically
430
+ code_extensions = {'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.cpp', '.c',
431
+ '.h', '.hpp', '.rs', '.go', '.rb', '.php'}
432
+
433
+ core_files = []
434
+ # Look for code files in the project (limit to avoid too many files)
435
+ for ext in code_extensions:
436
+ files = list(project_root.rglob(f'*{ext}'))
437
+ # Filter out common non-source directories
438
+ files = [f for f in files if not any(
439
+ skip in f.parts for skip in ['venv', '.venv', 'node_modules', '.git',
440
+ '__pycache__', '.pytest_cache', 'dist',
441
+ 'build', 'target', '.idea', '.vscode']
442
+ )]
443
+ core_files.extend(files[:20]) # Take up to 20 files per extension
444
+ if len(core_files) >= 50: # Increased limit to 50 files for better coverage
445
+ break
446
+
447
+ # Convert to relative paths
448
+ core_files = [str(f.relative_to(project_root)) for f in core_files[:50]]
283
449
 
284
450
  edited_files = set()
285
451
  for file_path in core_files:
@@ -289,10 +455,7 @@ class SessionQualityTracker:
289
455
 
290
456
  # Also check for recently modified files (last 30 minutes) to catch actual work
291
457
  try:
292
- # Validate project_root is within expected bounds
293
- if not str(project_root.resolve()).startswith(str(Path(__file__).parent.parent.resolve())):
294
- logger.error("Security: Invalid project root path")
295
- return {}
458
+ # No need to validate project_root - we can analyze any project
296
459
 
297
460
  # Use pathlib instead of subprocess for safer file discovery
298
461
  scripts_dir = project_root / "scripts"
@@ -321,7 +484,7 @@ class SessionQualityTracker:
321
484
  total_good_patterns = 0
322
485
  quality_scores = []
323
486
 
324
- for file_path in list(edited_files)[:10]: # Limit to 10 files for performance
487
+ for file_path in list(edited_files)[:50]: # Analyze up to 50 files for better coverage
325
488
  try:
326
489
  result = self.analyzer.analyze_file(file_path)
327
490
  metrics = result['quality_metrics']
@@ -367,7 +530,7 @@ class SessionQualityTracker:
367
530
  'avg_quality_score': round(avg_quality, 3),
368
531
  'total_issues': total_issues,
369
532
  'total_good_patterns': total_good_patterns,
370
- 'quality_grade': self._get_quality_grade(avg_quality)
533
+ 'quality_grade': self._get_quality_grade(avg_quality, total_issues)
371
534
  },
372
535
  'file_reports': file_reports,
373
536
  'actionable_items': self._generate_actionable_items(file_reports),
@@ -441,14 +604,14 @@ class SessionQualityTracker:
441
604
  return '\n'.join(report)
442
605
 
443
606
 
444
- def main():
607
+ def main(use_tracker=False):
445
608
  """Run session quality analysis."""
446
609
  tracker = SessionQualityTracker()
447
610
 
448
611
  logger.info("🔍 Analyzing current session code quality...")
449
612
  logger.info("")
450
613
 
451
- analysis = tracker.analyze_session_quality()
614
+ analysis = tracker.analyze_session_quality(use_tracker=use_tracker)
452
615
  report = tracker.generate_report(analysis)
453
616
 
454
617
  logger.info(report)
@@ -456,8 +619,8 @@ def main():
456
619
  # Save report for watcher integration - PER PROJECT
457
620
  # Always save cache, even with fallback analysis
458
621
  if analysis.get('status') in ['success', 'fallback']:
459
- # Get project name from current directory
460
- project_name = os.path.basename(os.getcwd())
622
+ # Get project name from environment or current directory
623
+ project_name = os.environ.get('QUALITY_PROJECT_NAME', os.path.basename(os.getcwd()))
461
624
  # Secure sanitization with whitelist approach
462
625
  import re
463
626
  safe_project_name = re.sub(r'[^a-zA-Z0-9_-]', '_', project_name)[:100]
@@ -478,4 +641,21 @@ def main():
478
641
 
479
642
 
480
643
  if __name__ == "__main__":
481
- main()
644
+ import argparse
645
+ parser = argparse.ArgumentParser(description='Analyze code quality for projects')
646
+ parser.add_argument('--project-path', help='Path to the project to analyze')
647
+ parser.add_argument('--project-name', help='Name of the project for cache file')
648
+ parser.add_argument('--use-tracker', action='store_true',
649
+ help='Use session edit tracker for analysis')
650
+ args = parser.parse_args()
651
+
652
+ # If external project specified, change to that directory
653
+ if args.project_path:
654
+ os.chdir(args.project_path)
655
+
656
+ # Override project name if specified
657
+ if args.project_name:
658
+ # This will be used in the main() function for cache naming
659
+ os.environ['QUALITY_PROJECT_NAME'] = args.project_name
660
+
661
+ main(use_tracker=args.use_tracker)