claude-self-reflect 3.3.1 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -55,9 +55,120 @@ def get_import_status():
55
55
  return "📚 CSR: Error"
56
56
 
57
57
 
58
+ def categorize_issues(file_reports):
59
+ """
60
+ Categorize issues from AST analysis into critical/medium/low.
61
+ """
62
+ critical = 0
63
+ medium = 0
64
+ low = 0
65
+
66
+ for file_path, report in file_reports.items():
67
+ # Only use top_issues for accurate counting (avoid double-counting from recommendations)
68
+ for issue in report.get('top_issues', []):
69
+ severity = issue.get('severity', 'medium')
70
+ count = issue.get('count', 0)
71
+ issue_id = issue.get('id', '').lower()
72
+
73
+ if severity == 'high' or severity == 'critical':
74
+ critical += count
75
+ elif severity == 'medium':
76
+ # Console.log and print statements are low severity
77
+ if 'print' in issue_id or 'console' in issue_id:
78
+ low += count
79
+ else:
80
+ medium += count
81
+ else:
82
+ low += count
83
+
84
+ return critical, medium, low
85
+
86
+
87
+ def get_quality_icon(critical=0, medium=0, low=0):
88
+ """
89
+ Determine quality icon based on issue severity counts.
90
+ """
91
+ # Icon selection based on highest severity present
92
+ if critical > 0:
93
+ if critical >= 10:
94
+ return "🔴" # Red circle - Critical issues need immediate attention
95
+ else:
96
+ return "🟠" # Orange circle - Some critical issues
97
+ elif medium > 0:
98
+ if medium >= 50:
99
+ return "🟡" # Yellow circle - Many medium issues
100
+ else:
101
+ return "🟢" # Green circle - Few medium issues
102
+ elif low > 0:
103
+ if low >= 100:
104
+ return "⚪" # White circle - Many minor issues (prints)
105
+ else:
106
+ return "✅" # Check mark - Only minor issues
107
+ else:
108
+ return "✨" # Sparkles - Perfect, no issues
109
+
110
+
111
+ def format_statusline_quality(critical=0, medium=0, low=0):
112
+ """
113
+ Format statusline with colored dot and labeled numbers.
114
+ """
115
+ import os
116
+ icon = get_quality_icon(critical, medium, low)
117
+
118
+ # Check if we should use colors (when in a TTY)
119
+ use_colors = os.isatty(sys.stdout.fileno()) if hasattr(sys.stdout, 'fileno') else False
120
+
121
+ # Build count display with colors if supported
122
+ counts = []
123
+ if critical > 0:
124
+ if use_colors:
125
+ # Use bright red for critical
126
+ counts.append(f"\033[1;31mC:{critical}\033[0m")
127
+ else:
128
+ counts.append(f"C:{critical}")
129
+ if medium > 0:
130
+ if use_colors:
131
+ # Use bright yellow for medium
132
+ counts.append(f"\033[1;33mM:{medium}\033[0m")
133
+ else:
134
+ counts.append(f"M:{medium}")
135
+ if low > 0:
136
+ if use_colors:
137
+ # Use bright white/gray for low
138
+ counts.append(f"\033[1;37mL:{low}\033[0m")
139
+ else:
140
+ counts.append(f"L:{low}")
141
+
142
+ if counts:
143
+ return f"{icon} {' '.join(counts)}"
144
+ else:
145
+ return f"{icon}" # Perfect - no counts needed
146
+
147
+
58
148
  def get_session_health():
59
- """Get cached session health."""
60
- cache_file = Path.home() / ".claude-self-reflect" / "session_quality.json"
149
+ """Get cached session health with icon-based quality display."""
150
+ # Check for session edit tracker to show appropriate label
151
+ tracker_file = Path.home() / ".claude-self-reflect" / "current_session_edits.json"
152
+
153
+ # Get quality cache file for current project
154
+ project_name = Path.cwd().name
155
+ cache_file = Path.home() / ".claude-self-reflect" / "quality_cache" / f"{project_name}.json"
156
+
157
+ # Default label prefix
158
+ label_prefix = ""
159
+
160
+ # Check if we have a session tracker with edited files
161
+ if tracker_file.exists():
162
+ try:
163
+ with open(tracker_file, 'r') as f:
164
+ tracker_data = json.load(f)
165
+ edited_files = tracker_data.get('edited_files', [])
166
+ if edited_files:
167
+ # Show session label with file count
168
+ file_count = len(edited_files)
169
+ label_prefix = f"Session ({file_count} file{'s' if file_count > 1 else ''}): "
170
+ except:
171
+ pass
61
172
 
62
173
  if not cache_file.exists():
63
174
  # Fall back to import status if no health data
@@ -79,22 +190,26 @@ def get_session_health():
79
190
  # Fall back to import status if no session
80
191
  return get_import_status()
81
192
 
82
- summary = data['summary']
83
- grade = summary['quality_grade']
84
- issues = summary['total_issues']
193
+ # Extract issue counts by severity
194
+ file_reports = data.get('file_reports', {})
195
+ critical, medium, low = categorize_issues(file_reports)
85
196
 
86
- # Color coding
87
- if grade in ['A+', 'A']:
88
- emoji = '🟢'
89
- elif grade in ['B', 'C']:
90
- emoji = '🟡'
91
- else:
92
- emoji = '🔴'
197
+ # Use the icon-based display with optional label
198
+ quality_display = format_statusline_quality(critical, medium, low)
93
199
 
94
- if issues > 0:
95
- return f"{emoji} Code: {grade} ({issues})"
96
- else:
97
- return f"{emoji} Code: {grade}"
200
+ # Add session label if we have one
201
+ if data.get('scope_label') == 'Session':
202
+ # For session scope, always show the label with counts
203
+ if label_prefix:
204
+ if critical == 0 and medium == 0 and low == 0:
205
+ return f"{label_prefix}0 0 0 {quality_display}"
206
+ else:
207
+ return f"{label_prefix}{critical} {medium} {low} {quality_display}"
208
+ else:
209
+ # Fallback if no tracker file
210
+ return f"Session: {critical} {medium} {low} {quality_display}"
211
+
212
+ return quality_display
98
213
 
99
214
  except Exception:
100
215
  return get_import_status()
@@ -257,6 +372,14 @@ def get_compact_status():
257
372
  cache_dir = Path.home() / ".claude-self-reflect" / "quality_cache"
258
373
  cache_file = cache_dir / f"{safe_project_name}.json"
259
374
 
375
+ # If the exact cache file doesn't exist, try to find one that ends with this project name
376
+ # This handles cases like "metafora-Atlas-gold.json" for project "Atlas-gold"
377
+ if not cache_file.exists():
378
+ # Look for files ending with the project name
379
+ possible_files = list(cache_dir.glob(f"*-{safe_project_name}.json"))
380
+ if possible_files:
381
+ cache_file = possible_files[0] # Use the first match
382
+
260
383
  # Validate cache file path stays within cache directory
261
384
  if cache_file.exists() and not str(cache_file.resolve()).startswith(str(cache_dir.resolve())):
262
385
  # Security issue - return placeholder
@@ -273,30 +396,38 @@ def get_compact_status():
273
396
  mtime = datetime.fromtimestamp(cache_file.stat().st_mtime)
274
397
  age = datetime.now() - mtime
275
398
 
276
- # Use quality data up to 24 hours old (more reasonable)
277
- if age < timedelta(hours=24):
399
+ # Use quality data up to 30 minutes old for fresher results
400
+ if age < timedelta(minutes=30):
278
401
  with open(cache_file, 'r') as f:
279
402
  data = json.load(f)
280
403
 
281
- if data.get('status') == 'success':
282
- summary = data['summary']
283
- grade = summary['quality_grade']
284
- issues = summary.get('total_issues', 0)
285
- scope = data.get('scope_label', 'Core') # Get scope label
286
-
287
- # GPT-5 fix: Remove forced downgrades, trust the analyzer's grade
288
- # Grade should reflect actual quality metrics, not arbitrary thresholds
289
-
290
- # Pick emoji based on grade
291
- if grade in ['A+', 'A']:
292
- emoji = '🟢'
293
- elif grade in ['B', 'C']:
294
- emoji = '🟡'
404
+ if data.get('status') == 'non-code':
405
+ # Non-code project - show documentation indicator
406
+ grade_str = "[📚:Docs]"
407
+ quality_valid = True
408
+ elif data.get('status') == 'success':
409
+ # Extract issue counts by severity for icon display
410
+ file_reports = data.get('file_reports', {})
411
+ critical, medium, low = categorize_issues(file_reports)
412
+
413
+ # Get icon based on severity
414
+ icon = get_quality_icon(critical, medium, low)
415
+
416
+ # Build compact display with ANSI colors for each severity level
417
+ colored_parts = []
418
+ if critical > 0:
419
+ colored_parts.append(f"\033[31m{critical}\033[0m") # Standard red for critical
420
+ if medium > 0:
421
+ colored_parts.append(f"\033[33m{medium}\033[0m") # Standard yellow for medium
422
+ if low > 0:
423
+ colored_parts.append(f"\033[37m{low}\033[0m") # White/light gray for low
424
+
425
+ # Join with middle dot separator
426
+ if colored_parts:
427
+ grade_str = f"[{icon}:{'·'.join(colored_parts)}]"
295
428
  else:
296
- emoji = '🔴'
429
+ grade_str = f"[{icon}]"
297
430
 
298
- # Simple, clear display without confusing scope labels
299
- grade_str = f"[{emoji}:{grade}/{issues}]"
300
431
  quality_valid = True
301
432
  except:
302
433
  pass
@@ -308,17 +439,31 @@ def get_compact_status():
308
439
  if cache_file.exists():
309
440
  with open(cache_file, 'r') as f:
310
441
  old_data = json.load(f)
311
- if old_data.get('status') == 'success':
312
- old_grade = old_data['summary']['quality_grade']
313
- old_issues = old_data['summary'].get('total_issues', 0)
314
- # Show with dimmed indicator that it's old
315
- if old_grade in ['A+', 'A']:
316
- emoji = '🟢'
317
- elif old_grade in ['B', 'C']:
318
- emoji = '🟡'
442
+ if old_data.get('status') == 'non-code':
443
+ # Non-code project - show documentation indicator
444
+ grade_str = "[📚:Docs]"
445
+ elif old_data.get('status') == 'success':
446
+ # Extract issue counts by severity for icon display
447
+ file_reports = old_data.get('file_reports', {})
448
+ critical, medium, low = categorize_issues(file_reports)
449
+
450
+ # Get icon based on severity
451
+ icon = get_quality_icon(critical, medium, low)
452
+
453
+ # Build compact display with ANSI colors for each severity level
454
+ colored_parts = []
455
+ if critical > 0:
456
+ colored_parts.append(f"\033[31m{critical}\033[0m") # Standard red for critical
457
+ if medium > 0:
458
+ colored_parts.append(f"\033[33m{medium}\033[0m") # Standard yellow for medium
459
+ if low > 0:
460
+ colored_parts.append(f"\033[37m{low}\033[0m") # White/light gray for low
461
+
462
+ # Join with middle dot separator
463
+ if colored_parts:
464
+ grade_str = f"[{icon}:{'·'.join(colored_parts)}]"
319
465
  else:
320
- emoji = '🔴'
321
- grade_str = f"[{emoji}:{old_grade}/{old_issues}]"
466
+ grade_str = f"[{icon}]"
322
467
  else:
323
468
  grade_str = "[...]"
324
469
  else:
@@ -335,22 +335,27 @@ def extract_metadata_single_pass(file_path: str) -> tuple[Dict[str, Any], str, i
335
335
  "concepts": [],
336
336
  "ast_elements": [],
337
337
  "has_code_blocks": False,
338
- "total_messages": 0
338
+ "total_messages": 0,
339
+ "project_path": None # Add project path from cwd
339
340
  }
340
-
341
+
341
342
  first_timestamp = None
342
343
  message_count = 0
343
344
  all_text = []
344
-
345
+
345
346
  try:
346
347
  with open(file_path, 'r', encoding='utf-8') as f:
347
348
  for line in f:
348
349
  if not line.strip():
349
350
  continue
350
-
351
+
351
352
  try:
352
353
  data = json.loads(line)
353
-
354
+
355
+ # Extract cwd (current working directory) as project path
356
+ if metadata["project_path"] is None and 'cwd' in data:
357
+ metadata["project_path"] = data.get('cwd')
358
+
354
359
  # Get timestamp from first valid entry
355
360
  if first_timestamp is None and 'timestamp' in data:
356
361
  first_timestamp = data.get('timestamp')
@@ -106,7 +106,7 @@ class SessionQualityTracker:
106
106
 
107
107
  return edited_files
108
108
 
109
- def analyze_session_quality(self, session_file: Optional[Path] = None) -> Dict[str, Any]:
109
+ def analyze_session_quality(self, session_file: Optional[Path] = None, use_tracker: bool = False) -> Dict[str, Any]:
110
110
  """
111
111
  Analyze code quality for all files edited in current session.
112
112
  Returns quality report with actionable insights.
@@ -114,6 +114,22 @@ class SessionQualityTracker:
114
114
  # Update patterns (uses cache, <100ms)
115
115
  check_and_update_patterns()
116
116
 
117
+ # Check for session edit tracker first (priority mode)
118
+ if use_tracker or (not session_file):
119
+ tracker_file = Path.home() / ".claude-self-reflect" / "current_session_edits.json"
120
+ if tracker_file.exists():
121
+ try:
122
+ with open(tracker_file, 'r') as f:
123
+ tracker_data = json.load(f)
124
+ edited_files = set(tracker_data.get('edited_files', []))
125
+ if edited_files:
126
+ logger.info(f"Using session tracker: {len(edited_files)} files edited in session")
127
+ self.current_session_id = 'active_session'
128
+ # Use Session scope label for tracked edits
129
+ return self._analyze_files_with_scope(edited_files, scope_label='Session')
130
+ except Exception as e:
131
+ logger.debug(f"Error reading tracker file: {e}")
132
+
117
133
  # Find active session if not provided
118
134
  if not session_file:
119
135
  session_file = self.find_active_session()
@@ -188,7 +204,7 @@ class SessionQualityTracker:
188
204
  'avg_quality_score': round(avg_quality, 3),
189
205
  'total_issues': total_issues,
190
206
  'total_good_patterns': total_good_patterns,
191
- 'quality_grade': self._get_quality_grade(avg_quality)
207
+ 'quality_grade': self._get_quality_grade(avg_quality, total_issues)
192
208
  },
193
209
  'file_reports': file_reports,
194
210
  'actionable_items': self._generate_actionable_items(file_reports),
@@ -212,20 +228,92 @@ class SessionQualityTracker:
212
228
  top_issues.sort(key=lambda x: x['count'], reverse=True)
213
229
  return top_issues[:5] # Top 5 issues
214
230
 
215
- def _get_quality_grade(self, score: float) -> str:
216
- """Convert quality score to letter grade."""
217
- if score >= 0.9:
218
- return 'A+'
219
- elif score >= 0.8:
220
- return 'A'
221
- elif score >= 0.7:
222
- return 'B'
223
- elif score >= 0.6:
224
- return 'C'
225
- elif score >= 0.5:
226
- return 'D'
227
- else:
231
+ def _get_quality_grade(self, score: float, total_issues: int = 0) -> str:
232
+ """
233
+ Convert quality score to letter grade.
234
+ Based on consensus: issues should dominate grading.
235
+
236
+ Grade boundaries (adjusted for issue count):
237
+ - A+: score >= 0.97 AND issues <= 5
238
+ - A: score >= 0.93 AND issues <= 20
239
+ - B: score >= 0.83 AND issues <= 50
240
+ - C: score >= 0.73 AND issues <= 100
241
+ - D: score >= 0.60
242
+ - F: score < 0.60
243
+ """
244
+ # Hard caps based on issue count (industry standard)
245
+ if total_issues > 200:
228
246
  return 'F'
247
+ elif total_issues > 100:
248
+ # Many issues - max grade is C
249
+ if score >= 0.77:
250
+ return 'C+'
251
+ elif score >= 0.73:
252
+ return 'C'
253
+ elif score >= 0.70:
254
+ return 'C-'
255
+ elif score >= 0.60:
256
+ return 'D'
257
+ else:
258
+ return 'F'
259
+ elif total_issues > 50:
260
+ # Moderate issues - max grade is B
261
+ if score >= 0.87:
262
+ return 'B+'
263
+ elif score >= 0.83:
264
+ return 'B'
265
+ elif score >= 0.80:
266
+ return 'B-'
267
+ elif score >= 0.73:
268
+ return 'C'
269
+ elif score >= 0.60:
270
+ return 'D'
271
+ else:
272
+ return 'F'
273
+ elif total_issues > 20:
274
+ # Some issues - max grade is A-
275
+ if score >= 0.90:
276
+ return 'A-'
277
+ elif score >= 0.87:
278
+ return 'B+'
279
+ elif score >= 0.83:
280
+ return 'B'
281
+ elif score >= 0.73:
282
+ return 'C'
283
+ elif score >= 0.60:
284
+ return 'D'
285
+ else:
286
+ return 'F'
287
+ elif total_issues > 5:
288
+ # Few issues - max grade is A
289
+ if score >= 0.93:
290
+ return 'A'
291
+ elif score >= 0.90:
292
+ return 'A-'
293
+ elif score >= 0.83:
294
+ return 'B'
295
+ elif score >= 0.73:
296
+ return 'C'
297
+ elif score >= 0.60:
298
+ return 'D'
299
+ else:
300
+ return 'F'
301
+ else:
302
+ # Very few issues (0-5) - can achieve A+
303
+ if score >= 0.97:
304
+ return 'A+'
305
+ elif score >= 0.93:
306
+ return 'A'
307
+ elif score >= 0.90:
308
+ return 'A-'
309
+ elif score >= 0.83:
310
+ return 'B'
311
+ elif score >= 0.73:
312
+ return 'C'
313
+ elif score >= 0.60:
314
+ return 'D'
315
+ else:
316
+ return 'F'
229
317
 
230
318
  def _generate_actionable_items(self, file_reports: Dict) -> List[str]:
231
319
  """Generate actionable recommendations for the user."""
@@ -263,23 +351,101 @@ class SessionQualityTracker:
263
351
 
264
352
  return actions
265
353
 
354
+ def _analyze_files_with_scope(self, edited_files: set, scope_label: str = 'Session') -> Dict[str, Any]:
355
+ """
356
+ Analyze specific files with a given scope label.
357
+ Used for both session tracking and fallback modes.
358
+ """
359
+ # Analyze each edited file
360
+ file_reports = {}
361
+ total_issues = 0
362
+ total_good_patterns = 0
363
+ quality_scores = []
364
+
365
+ for file_path in edited_files:
366
+ # Only analyze code files
367
+ if any(str(file_path).endswith(ext) for ext in ['.py', '.ts', '.js', '.tsx', '.jsx']):
368
+ try:
369
+ result = self.analyzer.analyze_file(file_path)
370
+ metrics = result['quality_metrics']
371
+
372
+ file_reports[file_path] = {
373
+ 'quality_score': metrics['quality_score'],
374
+ 'good_patterns': metrics['good_patterns_found'],
375
+ 'issues': metrics['total_issues'],
376
+ 'recommendations': result.get('recommendations', [])[:3], # Top 3
377
+ 'top_issues': self._get_top_issues(result)
378
+ }
379
+
380
+ total_issues += metrics['total_issues']
381
+ total_good_patterns += metrics['good_patterns_found']
382
+ quality_scores.append(metrics['quality_score'])
383
+
384
+ # Track quality history
385
+ if file_path not in self.quality_history:
386
+ self.quality_history[file_path] = []
387
+ self.quality_history[file_path].append({
388
+ 'timestamp': datetime.now().isoformat(),
389
+ 'score': metrics['quality_score']
390
+ })
391
+
392
+ except Exception as e:
393
+ logger.error(f"Failed to analyze {file_path}: {e}")
394
+
395
+ if not file_reports:
396
+ return {
397
+ 'status': 'no_code_files',
398
+ 'session_id': self.current_session_id,
399
+ 'scope_label': scope_label,
400
+ 'message': 'No analyzable code files in session'
401
+ }
402
+
403
+ # Calculate session average
404
+ avg_quality = sum(quality_scores) / len(quality_scores) if quality_scores else 0
405
+
406
+ # Generate session report
407
+ return {
408
+ 'status': 'success',
409
+ 'session_id': self.current_session_id,
410
+ 'scope_label': scope_label, # Use provided scope label
411
+ 'timestamp': datetime.now().isoformat(),
412
+ 'summary': {
413
+ 'files_analyzed': len(file_reports),
414
+ 'avg_quality_score': round(avg_quality, 3),
415
+ 'total_issues': total_issues,
416
+ 'total_good_patterns': total_good_patterns,
417
+ 'quality_grade': self._get_quality_grade(avg_quality, total_issues)
418
+ },
419
+ 'file_reports': file_reports,
420
+ 'actionable_items': self._generate_actionable_items(file_reports),
421
+ 'quality_trend': self._calculate_quality_trend()
422
+ }
423
+
266
424
  def analyze_recent_files(self) -> Dict[str, Any]:
267
425
  """Analyze core project files when no session is found."""
268
- project_root = Path(__file__).parent.parent
269
-
270
- # Define core project files to analyze (not test files)
271
- core_files = [
272
- "scripts/session_quality_tracker.py",
273
- "scripts/cc-statusline-unified.py",
274
- "scripts/pattern_registry_enhanced.py",
275
- "scripts/simplified_metadata_extractor.py",
276
- "scripts/streaming-watcher.py",
277
- "scripts/quality-report.py",
278
- "mcp-server/src/server.py",
279
- "mcp-server/src/search_tools.py",
280
- "mcp-server/src/temporal_tools.py",
281
- "mcp-server/src/reflection_tools.py",
282
- ]
426
+ # Use current working directory as project root
427
+ project_root = Path.cwd()
428
+
429
+ # Find code files in the project dynamically
430
+ code_extensions = {'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.cpp', '.c',
431
+ '.h', '.hpp', '.rs', '.go', '.rb', '.php'}
432
+
433
+ core_files = []
434
+ # Look for code files in the project (limit to avoid too many files)
435
+ for ext in code_extensions:
436
+ files = list(project_root.rglob(f'*{ext}'))
437
+ # Filter out common non-source directories
438
+ files = [f for f in files if not any(
439
+ skip in f.parts for skip in ['venv', '.venv', 'node_modules', '.git',
440
+ '__pycache__', '.pytest_cache', 'dist',
441
+ 'build', 'target', '.idea', '.vscode']
442
+ )]
443
+ core_files.extend(files[:20]) # Take up to 20 files per extension
444
+ if len(core_files) >= 50: # Increased limit to 50 files for better coverage
445
+ break
446
+
447
+ # Convert to relative paths
448
+ core_files = [str(f.relative_to(project_root)) for f in core_files[:50]]
283
449
 
284
450
  edited_files = set()
285
451
  for file_path in core_files:
@@ -289,10 +455,7 @@ class SessionQualityTracker:
289
455
 
290
456
  # Also check for recently modified files (last 30 minutes) to catch actual work
291
457
  try:
292
- # Validate project_root is within expected bounds
293
- if not str(project_root.resolve()).startswith(str(Path(__file__).parent.parent.resolve())):
294
- logger.error("Security: Invalid project root path")
295
- return {}
458
+ # No need to validate project_root - we can analyze any project
296
459
 
297
460
  # Use pathlib instead of subprocess for safer file discovery
298
461
  scripts_dir = project_root / "scripts"
@@ -321,7 +484,7 @@ class SessionQualityTracker:
321
484
  total_good_patterns = 0
322
485
  quality_scores = []
323
486
 
324
- for file_path in list(edited_files)[:10]: # Limit to 10 files for performance
487
+ for file_path in list(edited_files)[:50]: # Analyze up to 50 files for better coverage
325
488
  try:
326
489
  result = self.analyzer.analyze_file(file_path)
327
490
  metrics = result['quality_metrics']
@@ -367,7 +530,7 @@ class SessionQualityTracker:
367
530
  'avg_quality_score': round(avg_quality, 3),
368
531
  'total_issues': total_issues,
369
532
  'total_good_patterns': total_good_patterns,
370
- 'quality_grade': self._get_quality_grade(avg_quality)
533
+ 'quality_grade': self._get_quality_grade(avg_quality, total_issues)
371
534
  },
372
535
  'file_reports': file_reports,
373
536
  'actionable_items': self._generate_actionable_items(file_reports),
@@ -441,14 +604,14 @@ class SessionQualityTracker:
441
604
  return '\n'.join(report)
442
605
 
443
606
 
444
- def main():
607
+ def main(use_tracker=False):
445
608
  """Run session quality analysis."""
446
609
  tracker = SessionQualityTracker()
447
610
 
448
611
  logger.info("🔍 Analyzing current session code quality...")
449
612
  logger.info("")
450
613
 
451
- analysis = tracker.analyze_session_quality()
614
+ analysis = tracker.analyze_session_quality(use_tracker=use_tracker)
452
615
  report = tracker.generate_report(analysis)
453
616
 
454
617
  logger.info(report)
@@ -456,8 +619,8 @@ def main():
456
619
  # Save report for watcher integration - PER PROJECT
457
620
  # Always save cache, even with fallback analysis
458
621
  if analysis.get('status') in ['success', 'fallback']:
459
- # Get project name from current directory
460
- project_name = os.path.basename(os.getcwd())
622
+ # Get project name from environment or current directory
623
+ project_name = os.environ.get('QUALITY_PROJECT_NAME', os.path.basename(os.getcwd()))
461
624
  # Secure sanitization with whitelist approach
462
625
  import re
463
626
  safe_project_name = re.sub(r'[^a-zA-Z0-9_-]', '_', project_name)[:100]
@@ -478,4 +641,21 @@ def main():
478
641
 
479
642
 
480
643
  if __name__ == "__main__":
481
- main()
644
+ import argparse
645
+ parser = argparse.ArgumentParser(description='Analyze code quality for projects')
646
+ parser.add_argument('--project-path', help='Path to the project to analyze')
647
+ parser.add_argument('--project-name', help='Name of the project for cache file')
648
+ parser.add_argument('--use-tracker', action='store_true',
649
+ help='Use session edit tracker for analysis')
650
+ args = parser.parse_args()
651
+
652
+ # If external project specified, change to that directory
653
+ if args.project_path:
654
+ os.chdir(args.project_path)
655
+
656
+ # Override project name if specified
657
+ if args.project_name:
658
+ # This will be used in the main() function for cache naming
659
+ os.environ['QUALITY_PROJECT_NAME'] = args.project_name
660
+
661
+ main(use_tracker=args.use_tracker)