empathy-framework 4.6.6__py3-none-any.whl → 4.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- empathy_framework-4.7.1.dist-info/METADATA +690 -0
- empathy_framework-4.7.1.dist-info/RECORD +379 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/top_level.txt +1 -2
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
- empathy_llm_toolkit/agent_factory/__init__.py +6 -6
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
- empathy_llm_toolkit/agents_md/__init__.py +22 -0
- empathy_llm_toolkit/agents_md/loader.py +218 -0
- empathy_llm_toolkit/agents_md/parser.py +271 -0
- empathy_llm_toolkit/agents_md/registry.py +307 -0
- empathy_llm_toolkit/commands/__init__.py +51 -0
- empathy_llm_toolkit/commands/context.py +375 -0
- empathy_llm_toolkit/commands/loader.py +301 -0
- empathy_llm_toolkit/commands/models.py +231 -0
- empathy_llm_toolkit/commands/parser.py +371 -0
- empathy_llm_toolkit/commands/registry.py +429 -0
- empathy_llm_toolkit/config/__init__.py +8 -8
- empathy_llm_toolkit/config/unified.py +3 -7
- empathy_llm_toolkit/context/__init__.py +22 -0
- empathy_llm_toolkit/context/compaction.py +455 -0
- empathy_llm_toolkit/context/manager.py +434 -0
- empathy_llm_toolkit/hooks/__init__.py +24 -0
- empathy_llm_toolkit/hooks/config.py +306 -0
- empathy_llm_toolkit/hooks/executor.py +289 -0
- empathy_llm_toolkit/hooks/registry.py +302 -0
- empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
- empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
- empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
- empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
- empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
- empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
- empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
- empathy_llm_toolkit/learning/__init__.py +30 -0
- empathy_llm_toolkit/learning/evaluator.py +438 -0
- empathy_llm_toolkit/learning/extractor.py +514 -0
- empathy_llm_toolkit/learning/storage.py +560 -0
- empathy_llm_toolkit/providers.py +4 -11
- empathy_llm_toolkit/security/__init__.py +17 -17
- empathy_llm_toolkit/utils/tokens.py +2 -5
- empathy_os/__init__.py +202 -70
- empathy_os/cache_monitor.py +5 -3
- empathy_os/cli/__init__.py +11 -55
- empathy_os/cli/__main__.py +29 -15
- empathy_os/cli/commands/inspection.py +21 -12
- empathy_os/cli/commands/memory.py +4 -12
- empathy_os/cli/commands/profiling.py +198 -0
- empathy_os/cli/commands/utilities.py +27 -7
- empathy_os/cli.py +28 -57
- empathy_os/cli_unified.py +525 -1164
- empathy_os/cost_tracker.py +9 -3
- empathy_os/dashboard/server.py +200 -2
- empathy_os/hot_reload/__init__.py +7 -7
- empathy_os/hot_reload/config.py +6 -7
- empathy_os/hot_reload/integration.py +35 -35
- empathy_os/hot_reload/reloader.py +57 -57
- empathy_os/hot_reload/watcher.py +28 -28
- empathy_os/hot_reload/websocket.py +2 -2
- empathy_os/memory/__init__.py +11 -4
- empathy_os/memory/claude_memory.py +1 -1
- empathy_os/memory/cross_session.py +8 -12
- empathy_os/memory/edges.py +6 -6
- empathy_os/memory/file_session.py +770 -0
- empathy_os/memory/graph.py +30 -30
- empathy_os/memory/nodes.py +6 -6
- empathy_os/memory/short_term.py +15 -9
- empathy_os/memory/unified.py +606 -140
- empathy_os/meta_workflows/agent_creator.py +3 -9
- empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
- empathy_os/meta_workflows/form_engine.py +6 -18
- empathy_os/meta_workflows/intent_detector.py +64 -24
- empathy_os/meta_workflows/models.py +3 -1
- empathy_os/meta_workflows/pattern_learner.py +13 -31
- empathy_os/meta_workflows/plan_generator.py +55 -47
- empathy_os/meta_workflows/session_context.py +2 -3
- empathy_os/meta_workflows/workflow.py +20 -51
- empathy_os/models/cli.py +2 -2
- empathy_os/models/tasks.py +1 -2
- empathy_os/models/telemetry.py +4 -1
- empathy_os/models/token_estimator.py +3 -1
- empathy_os/monitoring/alerts.py +938 -9
- empathy_os/monitoring/alerts_cli.py +346 -183
- empathy_os/orchestration/execution_strategies.py +12 -29
- empathy_os/orchestration/pattern_learner.py +20 -26
- empathy_os/orchestration/real_tools.py +6 -15
- empathy_os/platform_utils.py +2 -1
- empathy_os/plugins/__init__.py +2 -2
- empathy_os/plugins/base.py +64 -64
- empathy_os/plugins/registry.py +32 -32
- empathy_os/project_index/index.py +49 -15
- empathy_os/project_index/models.py +1 -2
- empathy_os/project_index/reports.py +1 -1
- empathy_os/project_index/scanner.py +1 -0
- empathy_os/redis_memory.py +10 -7
- empathy_os/resilience/__init__.py +1 -1
- empathy_os/resilience/health.py +10 -10
- empathy_os/routing/__init__.py +7 -7
- empathy_os/routing/chain_executor.py +37 -37
- empathy_os/routing/classifier.py +36 -36
- empathy_os/routing/smart_router.py +40 -40
- empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
- empathy_os/scaffolding/__init__.py +8 -8
- empathy_os/scaffolding/__main__.py +1 -1
- empathy_os/scaffolding/cli.py +28 -28
- empathy_os/socratic/__init__.py +3 -19
- empathy_os/socratic/ab_testing.py +25 -36
- empathy_os/socratic/blueprint.py +38 -38
- empathy_os/socratic/cli.py +34 -20
- empathy_os/socratic/collaboration.py +30 -28
- empathy_os/socratic/domain_templates.py +9 -1
- empathy_os/socratic/embeddings.py +17 -13
- empathy_os/socratic/engine.py +135 -70
- empathy_os/socratic/explainer.py +70 -60
- empathy_os/socratic/feedback.py +24 -19
- empathy_os/socratic/forms.py +15 -10
- empathy_os/socratic/generator.py +51 -35
- empathy_os/socratic/llm_analyzer.py +25 -23
- empathy_os/socratic/mcp_server.py +99 -159
- empathy_os/socratic/session.py +19 -13
- empathy_os/socratic/storage.py +98 -67
- empathy_os/socratic/success.py +38 -27
- empathy_os/socratic/visual_editor.py +51 -39
- empathy_os/socratic/web_ui.py +99 -66
- empathy_os/telemetry/cli.py +3 -1
- empathy_os/telemetry/usage_tracker.py +1 -3
- empathy_os/test_generator/__init__.py +3 -3
- empathy_os/test_generator/cli.py +28 -28
- empathy_os/test_generator/generator.py +64 -66
- empathy_os/test_generator/risk_analyzer.py +11 -11
- empathy_os/vscode_bridge 2.py +173 -0
- empathy_os/vscode_bridge.py +173 -0
- empathy_os/workflows/__init__.py +212 -120
- empathy_os/workflows/batch_processing.py +8 -24
- empathy_os/workflows/bug_predict.py +1 -1
- empathy_os/workflows/code_review.py +20 -5
- empathy_os/workflows/code_review_pipeline.py +13 -8
- empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
- empathy_os/workflows/manage_documentation.py +1 -0
- empathy_os/workflows/orchestrated_health_check.py +6 -11
- empathy_os/workflows/orchestrated_release_prep.py +3 -3
- empathy_os/workflows/pr_review.py +18 -10
- empathy_os/workflows/progressive/README 2.md +454 -0
- empathy_os/workflows/progressive/__init__ 2.py +92 -0
- empathy_os/workflows/progressive/__init__.py +2 -12
- empathy_os/workflows/progressive/cli 2.py +242 -0
- empathy_os/workflows/progressive/cli.py +14 -37
- empathy_os/workflows/progressive/core 2.py +488 -0
- empathy_os/workflows/progressive/core.py +12 -12
- empathy_os/workflows/progressive/orchestrator 2.py +701 -0
- empathy_os/workflows/progressive/orchestrator.py +166 -144
- empathy_os/workflows/progressive/reports 2.py +528 -0
- empathy_os/workflows/progressive/reports.py +22 -31
- empathy_os/workflows/progressive/telemetry 2.py +280 -0
- empathy_os/workflows/progressive/telemetry.py +8 -14
- empathy_os/workflows/progressive/test_gen 2.py +514 -0
- empathy_os/workflows/progressive/test_gen.py +29 -48
- empathy_os/workflows/progressive/workflow 2.py +628 -0
- empathy_os/workflows/progressive/workflow.py +31 -70
- empathy_os/workflows/release_prep.py +21 -6
- empathy_os/workflows/release_prep_crew.py +1 -0
- empathy_os/workflows/secure_release.py +13 -6
- empathy_os/workflows/security_audit.py +8 -3
- empathy_os/workflows/test_coverage_boost_crew.py +3 -2
- empathy_os/workflows/test_maintenance_crew.py +1 -0
- empathy_os/workflows/test_runner.py +16 -12
- empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
- empathy_software_plugin/cli.py +0 -122
- patterns/README.md +119 -0
- patterns/__init__.py +95 -0
- patterns/behavior.py +298 -0
- patterns/code_review_memory.json +441 -0
- patterns/core.py +97 -0
- patterns/debugging.json +3763 -0
- patterns/empathy.py +268 -0
- patterns/health_check_memory.json +505 -0
- patterns/input.py +161 -0
- patterns/memory_graph.json +8 -0
- patterns/refactoring_memory.json +1113 -0
- patterns/registry.py +663 -0
- patterns/security_memory.json +8 -0
- patterns/structural.py +415 -0
- patterns/validation.py +194 -0
- coach_wizards/__init__.py +0 -45
- coach_wizards/accessibility_wizard.py +0 -91
- coach_wizards/api_wizard.py +0 -91
- coach_wizards/base_wizard.py +0 -209
- coach_wizards/cicd_wizard.py +0 -91
- coach_wizards/code_reviewer_README.md +0 -60
- coach_wizards/code_reviewer_wizard.py +0 -180
- coach_wizards/compliance_wizard.py +0 -91
- coach_wizards/database_wizard.py +0 -91
- coach_wizards/debugging_wizard.py +0 -91
- coach_wizards/documentation_wizard.py +0 -91
- coach_wizards/generate_wizards.py +0 -347
- coach_wizards/localization_wizard.py +0 -173
- coach_wizards/migration_wizard.py +0 -91
- coach_wizards/monitoring_wizard.py +0 -91
- coach_wizards/observability_wizard.py +0 -91
- coach_wizards/performance_wizard.py +0 -91
- coach_wizards/prompt_engineering_wizard.py +0 -661
- coach_wizards/refactoring_wizard.py +0 -91
- coach_wizards/scaling_wizard.py +0 -90
- coach_wizards/security_wizard.py +0 -92
- coach_wizards/testing_wizard.py +0 -91
- empathy_framework-4.6.6.dist-info/METADATA +0 -1597
- empathy_framework-4.6.6.dist-info/RECORD +0 -410
- empathy_llm_toolkit/wizards/__init__.py +0 -43
- empathy_llm_toolkit/wizards/base_wizard.py +0 -364
- empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
- empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
- empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
- empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
- empathy_os/wizard_factory_cli.py +0 -170
- empathy_software_plugin/wizards/__init__.py +0 -42
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
- empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
- empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
- empathy_software_plugin/wizards/base_wizard.py +0 -288
- empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
- empathy_software_plugin/wizards/code_review_wizard.py +0 -604
- empathy_software_plugin/wizards/debugging/__init__.py +0 -50
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
- empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
- empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
- empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
- empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
- empathy_software_plugin/wizards/debugging/verification.py +0 -369
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
- empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
- empathy_software_plugin/wizards/performance/__init__.py +0 -9
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
- empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
- empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
- empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
- empathy_software_plugin/wizards/security/__init__.py +0 -32
- empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
- empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
- empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
- empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
- empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
- empathy_software_plugin/wizards/testing/__init__.py +0 -27
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
- empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
- empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
- empathy_software_plugin/wizards/testing_wizard.py +0 -274
- wizards/__init__.py +0 -82
- wizards/admission_assessment_wizard.py +0 -644
- wizards/care_plan.py +0 -321
- wizards/clinical_assessment.py +0 -769
- wizards/discharge_planning.py +0 -77
- wizards/discharge_summary_wizard.py +0 -468
- wizards/dosage_calculation.py +0 -497
- wizards/incident_report_wizard.py +0 -454
- wizards/medication_reconciliation.py +0 -85
- wizards/nursing_assessment.py +0 -171
- wizards/patient_education.py +0 -654
- wizards/quality_improvement.py +0 -705
- wizards/sbar_report.py +0 -324
- wizards/sbar_wizard.py +0 -608
- wizards/shift_handoff_wizard.py +0 -535
- wizards/soap_note_wizard.py +0 -679
- wizards/treatment_plan.py +0 -15
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/WHEEL +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,604 +0,0 @@
|
|
|
1
|
-
"""Pattern-Based Code Review Wizard
|
|
2
|
-
|
|
3
|
-
Level 4 wizard that reviews code against historical bug patterns.
|
|
4
|
-
Uses resolved bugs to generate detection rules, then scans
|
|
5
|
-
new/changed code for similar anti-patterns.
|
|
6
|
-
|
|
7
|
-
Usage:
|
|
8
|
-
wizard = CodeReviewWizard()
|
|
9
|
-
result = await wizard.analyze({
|
|
10
|
-
"files": ["src/api.py", "src/utils.py"],
|
|
11
|
-
"staged_only": True,
|
|
12
|
-
})
|
|
13
|
-
|
|
14
|
-
CLI:
|
|
15
|
-
empathy review src/api.py
|
|
16
|
-
empathy review --staged
|
|
17
|
-
|
|
18
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
19
|
-
Licensed under Fair Source 0.9
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
import json
|
|
23
|
-
import logging
|
|
24
|
-
import re
|
|
25
|
-
import subprocess
|
|
26
|
-
from dataclasses import dataclass, field
|
|
27
|
-
from datetime import datetime
|
|
28
|
-
from pathlib import Path
|
|
29
|
-
from typing import Any
|
|
30
|
-
|
|
31
|
-
from .base_wizard import BaseWizard
|
|
32
|
-
|
|
33
|
-
logger = logging.getLogger(__name__)
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
@dataclass
|
|
37
|
-
class ReviewFinding:
|
|
38
|
-
"""A potential issue found during code review."""
|
|
39
|
-
|
|
40
|
-
file: str
|
|
41
|
-
line: int
|
|
42
|
-
pattern_type: str
|
|
43
|
-
pattern_id: str
|
|
44
|
-
description: str
|
|
45
|
-
historical_cause: str
|
|
46
|
-
suggestion: str
|
|
47
|
-
code_snippet: str
|
|
48
|
-
confidence: float
|
|
49
|
-
severity: str = "warning"
|
|
50
|
-
|
|
51
|
-
def to_dict(self) -> dict:
|
|
52
|
-
return {
|
|
53
|
-
"file": self.file,
|
|
54
|
-
"line": self.line,
|
|
55
|
-
"pattern_type": self.pattern_type,
|
|
56
|
-
"pattern_id": self.pattern_id,
|
|
57
|
-
"description": self.description,
|
|
58
|
-
"historical_cause": self.historical_cause,
|
|
59
|
-
"suggestion": self.suggestion,
|
|
60
|
-
"code_snippet": self.code_snippet,
|
|
61
|
-
"confidence": self.confidence,
|
|
62
|
-
"severity": self.severity,
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
@dataclass
|
|
67
|
-
class AntiPatternRule:
|
|
68
|
-
"""Detection rule generated from historical bugs."""
|
|
69
|
-
|
|
70
|
-
pattern_type: str
|
|
71
|
-
description: str
|
|
72
|
-
detect_patterns: list[str] = field(default_factory=list)
|
|
73
|
-
safe_patterns: list[str] = field(default_factory=list)
|
|
74
|
-
fix_suggestion: str = ""
|
|
75
|
-
reference_bugs: list[str] = field(default_factory=list)
|
|
76
|
-
severity: str = "warning"
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
class CodeReviewWizard(BaseWizard):
|
|
80
|
-
"""Reviews code against historical bug patterns.
|
|
81
|
-
|
|
82
|
-
This is the capstone wizard that brings together pattern learning
|
|
83
|
-
to prevent bugs before they occur.
|
|
84
|
-
"""
|
|
85
|
-
|
|
86
|
-
@property
|
|
87
|
-
def name(self) -> str:
|
|
88
|
-
return "CodeReviewWizard"
|
|
89
|
-
|
|
90
|
-
@property
|
|
91
|
-
def level(self) -> int:
|
|
92
|
-
return 4 # Anticipatory
|
|
93
|
-
|
|
94
|
-
def __init__(self, patterns_dir: str = "./patterns", **kwargs):
|
|
95
|
-
super().__init__(**kwargs)
|
|
96
|
-
self.patterns_dir = Path(patterns_dir)
|
|
97
|
-
self._rules: list[AntiPatternRule] = []
|
|
98
|
-
self._bugs_loaded = False
|
|
99
|
-
|
|
100
|
-
# Built-in anti-pattern rules (extended by loaded bugs)
|
|
101
|
-
self._builtin_rules = {
|
|
102
|
-
"null_reference": AntiPatternRule(
|
|
103
|
-
pattern_type="null_reference",
|
|
104
|
-
description="Potential null/undefined reference",
|
|
105
|
-
detect_patterns=[
|
|
106
|
-
r"\.map\s*\(", # Array method that fails on null
|
|
107
|
-
r"\.forEach\s*\(",
|
|
108
|
-
r"\.filter\s*\(",
|
|
109
|
-
r"\.reduce\s*\(",
|
|
110
|
-
r"\[\s*\d+\s*\]", # Direct index access
|
|
111
|
-
r"\.length\b", # Length property on potentially null
|
|
112
|
-
r"for\s+\w+\s+in\s+", # Iteration over potentially null
|
|
113
|
-
],
|
|
114
|
-
safe_patterns=[
|
|
115
|
-
r"\?\.", # Optional chaining
|
|
116
|
-
r"\?\?\s*\[", # Nullish coalescing with array
|
|
117
|
-
r"if\s*\(\s*\w+", # Preceded by if check
|
|
118
|
-
r"&&\s*\w+\.", # Short-circuit check
|
|
119
|
-
r"\.get\s*\(", # Python safe get
|
|
120
|
-
r"or\s*\[\]", # Python or fallback
|
|
121
|
-
],
|
|
122
|
-
fix_suggestion="Add null check: data?.items ?? [] or if (data) {...}",
|
|
123
|
-
severity="warning",
|
|
124
|
-
),
|
|
125
|
-
"async_timing": AntiPatternRule(
|
|
126
|
-
pattern_type="async_timing",
|
|
127
|
-
description="Potential missing await",
|
|
128
|
-
detect_patterns=[
|
|
129
|
-
r"async\s+\w+\s*\([^)]*\)\s*[^{]*\{[^}]*\w+\s*\(", # Async fn with call
|
|
130
|
-
r"Promise\.\w+\s*\(", # Promise methods
|
|
131
|
-
r"\.then\s*\([^)]*\)\s*$", # Dangling then
|
|
132
|
-
],
|
|
133
|
-
safe_patterns=[
|
|
134
|
-
r"\bawait\s+", # Has await
|
|
135
|
-
r"return\s+\w+\s*\(", # Returned promise
|
|
136
|
-
r"\.then\s*\([^)]*\)\s*\.\s*catch", # Has catch
|
|
137
|
-
],
|
|
138
|
-
fix_suggestion="Add await keyword or handle promise with .then().catch()",
|
|
139
|
-
severity="warning",
|
|
140
|
-
),
|
|
141
|
-
"error_handling": AntiPatternRule(
|
|
142
|
-
pattern_type="error_handling",
|
|
143
|
-
description="Missing error handling",
|
|
144
|
-
detect_patterns=[
|
|
145
|
-
r"fetch\s*\(", # Fetch without error handling
|
|
146
|
-
r"axios\.\w+\s*\(", # Axios calls
|
|
147
|
-
r"requests\.\w+\s*\(", # Python requests
|
|
148
|
-
r"\.json\s*\(", # JSON parsing
|
|
149
|
-
r"JSON\.parse\s*\(", # JSON.parse
|
|
150
|
-
r"open\s*\([^)]+\)", # File operations
|
|
151
|
-
],
|
|
152
|
-
safe_patterns=[
|
|
153
|
-
r"try\s*[:\{]", # In try block
|
|
154
|
-
r"\.catch\s*\(", # Has catch
|
|
155
|
-
r"except\s+", # Python except
|
|
156
|
-
r"with\s+open", # Python context manager
|
|
157
|
-
],
|
|
158
|
-
fix_suggestion="Wrap in try/catch or add .catch() handler",
|
|
159
|
-
severity="info",
|
|
160
|
-
),
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
async def analyze(self, context: dict[str, Any]) -> dict[str, Any]:
|
|
164
|
-
"""Analyze files for anti-patterns based on historical bugs.
|
|
165
|
-
|
|
166
|
-
Args:
|
|
167
|
-
context: {
|
|
168
|
-
"files": list[str] - Files to review
|
|
169
|
-
"staged_only": bool - Only review staged changes
|
|
170
|
-
"diff": str - Direct diff content (optional)
|
|
171
|
-
"severity_threshold": str - Minimum severity (info/warning/error)
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
Returns:
|
|
175
|
-
{
|
|
176
|
-
"findings": list of ReviewFinding dicts,
|
|
177
|
-
"summary": summary stats,
|
|
178
|
-
"predictions": Level 4 predictions,
|
|
179
|
-
"recommendations": actionable steps,
|
|
180
|
-
"confidence": overall confidence
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
"""
|
|
184
|
-
# Load historical patterns if not already loaded
|
|
185
|
-
if not self._bugs_loaded:
|
|
186
|
-
self._load_historical_bugs()
|
|
187
|
-
self._bugs_loaded = True
|
|
188
|
-
|
|
189
|
-
files = context.get("files", [])
|
|
190
|
-
staged_only = context.get("staged_only", False)
|
|
191
|
-
diff = context.get("diff")
|
|
192
|
-
severity_threshold = context.get("severity_threshold", "info")
|
|
193
|
-
|
|
194
|
-
# Get files to review
|
|
195
|
-
if staged_only:
|
|
196
|
-
files = self._get_staged_files()
|
|
197
|
-
elif not files and not diff:
|
|
198
|
-
# Default to recently changed files
|
|
199
|
-
files = self._get_recent_changed_files()
|
|
200
|
-
|
|
201
|
-
# Perform review
|
|
202
|
-
findings = []
|
|
203
|
-
|
|
204
|
-
if diff:
|
|
205
|
-
findings.extend(self._review_diff(diff))
|
|
206
|
-
else:
|
|
207
|
-
for file_path in files:
|
|
208
|
-
findings.extend(self._review_file(file_path))
|
|
209
|
-
|
|
210
|
-
# Filter by severity
|
|
211
|
-
severity_order = {"info": 0, "warning": 1, "error": 2}
|
|
212
|
-
threshold = severity_order.get(severity_threshold, 0)
|
|
213
|
-
findings = [f for f in findings if severity_order.get(f.severity, 0) >= threshold]
|
|
214
|
-
|
|
215
|
-
# Generate predictions and recommendations
|
|
216
|
-
predictions = self._generate_predictions(findings)
|
|
217
|
-
recommendations = self._generate_recommendations(findings)
|
|
218
|
-
|
|
219
|
-
return {
|
|
220
|
-
"findings": [f.to_dict() for f in findings],
|
|
221
|
-
"summary": {
|
|
222
|
-
"total_findings": len(findings),
|
|
223
|
-
"by_severity": self._count_by_severity(findings),
|
|
224
|
-
"by_type": self._count_by_type(findings),
|
|
225
|
-
"files_reviewed": len(files) if files else 1,
|
|
226
|
-
},
|
|
227
|
-
"predictions": predictions,
|
|
228
|
-
"recommendations": recommendations,
|
|
229
|
-
"confidence": self._calculate_confidence(findings),
|
|
230
|
-
"metadata": {
|
|
231
|
-
"wizard": self.name,
|
|
232
|
-
"level": self.level,
|
|
233
|
-
"timestamp": datetime.now().isoformat(),
|
|
234
|
-
"rules_loaded": len(self._rules) + len(self._builtin_rules),
|
|
235
|
-
},
|
|
236
|
-
}
|
|
237
|
-
|
|
238
|
-
def _load_historical_bugs(self) -> None:
|
|
239
|
-
"""Load resolved bugs and generate detection rules."""
|
|
240
|
-
for debug_dir in ["debugging", "debugging_demo", "repo_test/debugging"]:
|
|
241
|
-
dir_path = self.patterns_dir / debug_dir
|
|
242
|
-
if not dir_path.exists():
|
|
243
|
-
continue
|
|
244
|
-
|
|
245
|
-
for json_file in dir_path.glob("bug_*.json"):
|
|
246
|
-
try:
|
|
247
|
-
with open(json_file, encoding="utf-8") as f:
|
|
248
|
-
bug = json.load(f)
|
|
249
|
-
|
|
250
|
-
# Only use resolved bugs with fixes
|
|
251
|
-
if bug.get("status") != "resolved":
|
|
252
|
-
continue
|
|
253
|
-
if not bug.get("fix_applied"):
|
|
254
|
-
continue
|
|
255
|
-
|
|
256
|
-
# Create rule from bug
|
|
257
|
-
rule = self._bug_to_rule(bug)
|
|
258
|
-
if rule:
|
|
259
|
-
self._rules.append(rule)
|
|
260
|
-
|
|
261
|
-
except (json.JSONDecodeError, OSError):
|
|
262
|
-
continue
|
|
263
|
-
|
|
264
|
-
def _bug_to_rule(self, bug: dict) -> AntiPatternRule | None:
|
|
265
|
-
"""Convert a resolved bug to a detection rule."""
|
|
266
|
-
error_type = bug.get("error_type", "unknown")
|
|
267
|
-
fix_code = bug.get("fix_code", "")
|
|
268
|
-
|
|
269
|
-
# Start with builtin rule for this type if exists
|
|
270
|
-
base_rule = self._builtin_rules.get(error_type)
|
|
271
|
-
if not base_rule:
|
|
272
|
-
return None
|
|
273
|
-
|
|
274
|
-
# Extend with bug-specific info
|
|
275
|
-
return AntiPatternRule(
|
|
276
|
-
pattern_type=error_type,
|
|
277
|
-
description=f"{base_rule.description} (historical: {bug.get('bug_id', 'unknown')})",
|
|
278
|
-
detect_patterns=base_rule.detect_patterns,
|
|
279
|
-
safe_patterns=base_rule.safe_patterns + self._extract_safe_patterns(fix_code),
|
|
280
|
-
fix_suggestion=bug.get("fix_applied", base_rule.fix_suggestion),
|
|
281
|
-
reference_bugs=[bug.get("bug_id", "unknown")],
|
|
282
|
-
severity=base_rule.severity,
|
|
283
|
-
)
|
|
284
|
-
|
|
285
|
-
def _extract_safe_patterns(self, fix_code: str) -> list[str]:
|
|
286
|
-
"""Extract regex patterns from fix code that indicate safety."""
|
|
287
|
-
patterns: list[str] = []
|
|
288
|
-
if not fix_code:
|
|
289
|
-
return patterns
|
|
290
|
-
|
|
291
|
-
# Common safe patterns to detect
|
|
292
|
-
if "?." in fix_code:
|
|
293
|
-
patterns.append(r"\?\.")
|
|
294
|
-
if "??" in fix_code:
|
|
295
|
-
patterns.append(r"\?\?")
|
|
296
|
-
if "await" in fix_code.lower():
|
|
297
|
-
patterns.append(r"\bawait\s+")
|
|
298
|
-
if ".get(" in fix_code:
|
|
299
|
-
patterns.append(r"\.get\s*\(")
|
|
300
|
-
if "try" in fix_code.lower():
|
|
301
|
-
patterns.append(r"\btry\s*[:\{]")
|
|
302
|
-
|
|
303
|
-
return patterns
|
|
304
|
-
|
|
305
|
-
def _review_file(self, file_path: str) -> list[ReviewFinding]:
|
|
306
|
-
"""Review a single file for anti-patterns."""
|
|
307
|
-
findings: list[ReviewFinding] = []
|
|
308
|
-
|
|
309
|
-
try:
|
|
310
|
-
path = Path(file_path)
|
|
311
|
-
if not path.exists():
|
|
312
|
-
return findings
|
|
313
|
-
|
|
314
|
-
content = path.read_text(encoding="utf-8", errors="ignore")
|
|
315
|
-
except (OSError, UnicodeDecodeError):
|
|
316
|
-
return findings
|
|
317
|
-
|
|
318
|
-
lines = content.split("\n")
|
|
319
|
-
|
|
320
|
-
for line_num, line in enumerate(lines, 1):
|
|
321
|
-
# Skip comments and empty lines
|
|
322
|
-
stripped = line.strip()
|
|
323
|
-
if not stripped or stripped.startswith(("#", "//", "/*", "*")):
|
|
324
|
-
continue
|
|
325
|
-
|
|
326
|
-
# Check all rules
|
|
327
|
-
for rule in list(self._builtin_rules.values()) + self._rules:
|
|
328
|
-
finding = self._check_line_against_rule(file_path, line_num, line, rule, lines)
|
|
329
|
-
if finding:
|
|
330
|
-
findings.append(finding)
|
|
331
|
-
|
|
332
|
-
return findings
|
|
333
|
-
|
|
334
|
-
def _review_diff(self, diff: str) -> list[ReviewFinding]:
|
|
335
|
-
"""Review a git diff for anti-patterns."""
|
|
336
|
-
findings = []
|
|
337
|
-
current_file = ""
|
|
338
|
-
line_num = 0
|
|
339
|
-
|
|
340
|
-
for line in diff.split("\n"):
|
|
341
|
-
# Track current file
|
|
342
|
-
if line.startswith("diff --git"):
|
|
343
|
-
match = re.search(r"b/(.+)$", line)
|
|
344
|
-
current_file = match.group(1) if match else ""
|
|
345
|
-
continue
|
|
346
|
-
|
|
347
|
-
# Track line numbers
|
|
348
|
-
if line.startswith("@@"):
|
|
349
|
-
match = re.search(r"\+(\d+)", line)
|
|
350
|
-
line_num = int(match.group(1)) if match else 0
|
|
351
|
-
continue
|
|
352
|
-
|
|
353
|
-
# Check added lines
|
|
354
|
-
if line.startswith("+") and not line.startswith("+++"):
|
|
355
|
-
code_line = line[1:] # Remove + prefix
|
|
356
|
-
for rule in list(self._builtin_rules.values()) + self._rules:
|
|
357
|
-
finding = self._check_line_against_rule(
|
|
358
|
-
current_file,
|
|
359
|
-
line_num,
|
|
360
|
-
code_line,
|
|
361
|
-
rule,
|
|
362
|
-
[],
|
|
363
|
-
)
|
|
364
|
-
if finding:
|
|
365
|
-
findings.append(finding)
|
|
366
|
-
line_num += 1
|
|
367
|
-
|
|
368
|
-
return findings
|
|
369
|
-
|
|
370
|
-
def _check_line_against_rule(
|
|
371
|
-
self,
|
|
372
|
-
file_path: str,
|
|
373
|
-
line_num: int,
|
|
374
|
-
line: str,
|
|
375
|
-
rule: AntiPatternRule,
|
|
376
|
-
all_lines: list[str],
|
|
377
|
-
) -> ReviewFinding | None:
|
|
378
|
-
"""Check a single line against a rule."""
|
|
379
|
-
# Check if any detect pattern matches
|
|
380
|
-
detected = False
|
|
381
|
-
for pattern in rule.detect_patterns:
|
|
382
|
-
if re.search(pattern, line):
|
|
383
|
-
detected = True
|
|
384
|
-
break
|
|
385
|
-
|
|
386
|
-
if not detected:
|
|
387
|
-
return None
|
|
388
|
-
|
|
389
|
-
# Check if any safe pattern is present (in this line or nearby)
|
|
390
|
-
context_window = 3
|
|
391
|
-
start = max(0, line_num - context_window - 1)
|
|
392
|
-
end = min(len(all_lines), line_num + context_window)
|
|
393
|
-
context = "\n".join(all_lines[start:end]) if all_lines else line
|
|
394
|
-
|
|
395
|
-
for safe_pattern in rule.safe_patterns:
|
|
396
|
-
if re.search(safe_pattern, context):
|
|
397
|
-
return None # Safe pattern found, no issue
|
|
398
|
-
|
|
399
|
-
# Calculate confidence based on rule quality
|
|
400
|
-
confidence = 0.7
|
|
401
|
-
if rule.reference_bugs:
|
|
402
|
-
confidence += 0.1 # Backed by historical data
|
|
403
|
-
if len(rule.detect_patterns) > 2:
|
|
404
|
-
confidence -= 0.1 # More generic rule
|
|
405
|
-
|
|
406
|
-
return ReviewFinding(
|
|
407
|
-
file=file_path,
|
|
408
|
-
line=line_num,
|
|
409
|
-
pattern_type=rule.pattern_type,
|
|
410
|
-
pattern_id=rule.reference_bugs[0] if rule.reference_bugs else "builtin",
|
|
411
|
-
description=rule.description,
|
|
412
|
-
historical_cause=(
|
|
413
|
-
f"Historical: {rule.reference_bugs}" if rule.reference_bugs else "Built-in rule"
|
|
414
|
-
),
|
|
415
|
-
suggestion=rule.fix_suggestion,
|
|
416
|
-
code_snippet=line.strip()[:80],
|
|
417
|
-
confidence=min(confidence, 0.95),
|
|
418
|
-
severity=rule.severity,
|
|
419
|
-
)
|
|
420
|
-
|
|
421
|
-
def _get_staged_files(self) -> list[str]:
|
|
422
|
-
"""Get list of staged files from git."""
|
|
423
|
-
try:
|
|
424
|
-
result = subprocess.run(
|
|
425
|
-
["git", "diff", "--cached", "--name-only"],
|
|
426
|
-
check=False,
|
|
427
|
-
capture_output=True,
|
|
428
|
-
text=True,
|
|
429
|
-
timeout=5,
|
|
430
|
-
)
|
|
431
|
-
if result.returncode == 0:
|
|
432
|
-
return [f.strip() for f in result.stdout.strip().split("\n") if f.strip()]
|
|
433
|
-
except Exception as e:
|
|
434
|
-
# Optional: Git staged files unavailable (not a git repo)
|
|
435
|
-
logger.debug(f"Could not get staged files: {e}")
|
|
436
|
-
pass
|
|
437
|
-
return []
|
|
438
|
-
|
|
439
|
-
def _get_recent_changed_files(self) -> list[str]:
|
|
440
|
-
"""Get recently changed files from git."""
|
|
441
|
-
try:
|
|
442
|
-
result = subprocess.run(
|
|
443
|
-
["git", "diff", "--name-only", "HEAD~3", "HEAD"],
|
|
444
|
-
check=False,
|
|
445
|
-
capture_output=True,
|
|
446
|
-
text=True,
|
|
447
|
-
timeout=5,
|
|
448
|
-
)
|
|
449
|
-
if result.returncode == 0:
|
|
450
|
-
return [f.strip() for f in result.stdout.strip().split("\n") if f.strip()]
|
|
451
|
-
except Exception as e:
|
|
452
|
-
# Optional: Git history unavailable (not a git repo)
|
|
453
|
-
logger.debug(f"Could not get recent changed files: {e}")
|
|
454
|
-
pass
|
|
455
|
-
return []
|
|
456
|
-
|
|
457
|
-
def _count_by_severity(self, findings: list[ReviewFinding]) -> dict[str, int]:
|
|
458
|
-
"""Count findings by severity."""
|
|
459
|
-
counts: dict[str, int] = {}
|
|
460
|
-
for f in findings:
|
|
461
|
-
counts[f.severity] = counts.get(f.severity, 0) + 1
|
|
462
|
-
return counts
|
|
463
|
-
|
|
464
|
-
def _count_by_type(self, findings: list[ReviewFinding]) -> dict[str, int]:
|
|
465
|
-
"""Count findings by pattern type."""
|
|
466
|
-
counts: dict[str, int] = {}
|
|
467
|
-
for f in findings:
|
|
468
|
-
counts[f.pattern_type] = counts.get(f.pattern_type, 0) + 1
|
|
469
|
-
return counts
|
|
470
|
-
|
|
471
|
-
def _calculate_confidence(self, findings: list[ReviewFinding]) -> float:
|
|
472
|
-
"""Calculate overall confidence score."""
|
|
473
|
-
if not findings:
|
|
474
|
-
return 1.0 # No issues found with high confidence
|
|
475
|
-
|
|
476
|
-
avg_conf = sum(f.confidence for f in findings) / len(findings)
|
|
477
|
-
return round(avg_conf, 2)
|
|
478
|
-
|
|
479
|
-
def _generate_predictions(self, findings: list[ReviewFinding]) -> list[dict[str, Any]]:
|
|
480
|
-
"""Generate Level 4 predictions."""
|
|
481
|
-
predictions: list[dict[str, Any]] = []
|
|
482
|
-
|
|
483
|
-
if not findings:
|
|
484
|
-
predictions.append(
|
|
485
|
-
{
|
|
486
|
-
"type": "clean_review",
|
|
487
|
-
"severity": "info",
|
|
488
|
-
"description": "No anti-patterns detected. Code looks clean!",
|
|
489
|
-
},
|
|
490
|
-
)
|
|
491
|
-
return predictions
|
|
492
|
-
|
|
493
|
-
# Predict based on finding types
|
|
494
|
-
type_counts = self._count_by_type(findings)
|
|
495
|
-
most_common = max(type_counts.items(), key=lambda x: x[1]) if type_counts else None
|
|
496
|
-
|
|
497
|
-
if most_common and most_common[1] >= 2:
|
|
498
|
-
predictions.append(
|
|
499
|
-
{
|
|
500
|
-
"type": "recurring_issue",
|
|
501
|
-
"severity": "warning",
|
|
502
|
-
"description": f"Multiple {most_common[0]} issues ({most_common[1]}). "
|
|
503
|
-
f"This pattern may indicate a systemic problem.",
|
|
504
|
-
"prevention_steps": [
|
|
505
|
-
f"Add linting rule for {most_common[0]}",
|
|
506
|
-
"Consider code review checklist",
|
|
507
|
-
"Add unit tests for edge cases",
|
|
508
|
-
],
|
|
509
|
-
},
|
|
510
|
-
)
|
|
511
|
-
|
|
512
|
-
# High severity findings
|
|
513
|
-
error_count = sum(1 for f in findings if f.severity == "error")
|
|
514
|
-
if error_count > 0:
|
|
515
|
-
predictions.append(
|
|
516
|
-
{
|
|
517
|
-
"type": "high_risk",
|
|
518
|
-
"severity": "error",
|
|
519
|
-
"description": f"{error_count} high-severity issue(s) found. "
|
|
520
|
-
f"These may cause runtime errors.",
|
|
521
|
-
},
|
|
522
|
-
)
|
|
523
|
-
|
|
524
|
-
return predictions
|
|
525
|
-
|
|
526
|
-
def _generate_recommendations(self, findings: list[ReviewFinding]) -> list[str]:
|
|
527
|
-
"""Generate actionable recommendations."""
|
|
528
|
-
if not findings:
|
|
529
|
-
return ["No issues found. Proceed with commit."]
|
|
530
|
-
|
|
531
|
-
recommendations = []
|
|
532
|
-
|
|
533
|
-
# Group by type for consolidated recommendations
|
|
534
|
-
type_counts = self._count_by_type(findings)
|
|
535
|
-
for pattern_type, count in sorted(type_counts.items(), key=lambda x: -x[1]):
|
|
536
|
-
sample = next((f for f in findings if f.pattern_type == pattern_type), None)
|
|
537
|
-
if sample:
|
|
538
|
-
recommendations.append(f"Fix {count} {pattern_type} issue(s): {sample.suggestion}")
|
|
539
|
-
|
|
540
|
-
# Add general recommendation
|
|
541
|
-
if len(findings) > 3:
|
|
542
|
-
recommendations.append("Consider running the full test suite before committing.")
|
|
543
|
-
|
|
544
|
-
return recommendations
|
|
545
|
-
|
|
546
|
-
def format_terminal_output(self, result: dict) -> str:
|
|
547
|
-
"""Format review results for terminal output."""
|
|
548
|
-
lines = [
|
|
549
|
-
"Code Review Results",
|
|
550
|
-
"=" * 40,
|
|
551
|
-
"",
|
|
552
|
-
]
|
|
553
|
-
|
|
554
|
-
findings = result.get("findings", [])
|
|
555
|
-
if not findings:
|
|
556
|
-
lines.append("✓ No issues found!")
|
|
557
|
-
lines.append("")
|
|
558
|
-
return "\n".join(lines)
|
|
559
|
-
|
|
560
|
-
for finding in findings:
|
|
561
|
-
icon = (
|
|
562
|
-
"⚠️"
|
|
563
|
-
if finding["severity"] == "warning"
|
|
564
|
-
else "❌" if finding["severity"] == "error" else "ℹ️"
|
|
565
|
-
)
|
|
566
|
-
lines.append(f"{icon} {finding['file']}:{finding['line']}")
|
|
567
|
-
lines.append(f" Pattern: {finding['pattern_type']} ({finding['pattern_id']})")
|
|
568
|
-
lines.append(f" Risk: {finding['description']}")
|
|
569
|
-
lines.append(f" Historical: {finding['historical_cause']}")
|
|
570
|
-
lines.append(f" Suggestion: {finding['suggestion']}")
|
|
571
|
-
lines.append(f" Confidence: {finding['confidence']:.0%}")
|
|
572
|
-
lines.append("")
|
|
573
|
-
|
|
574
|
-
summary = result.get("summary", {})
|
|
575
|
-
lines.append(
|
|
576
|
-
f"Summary: {summary.get('total_findings', 0)} findings in {summary.get('files_reviewed', 0)} file(s)",
|
|
577
|
-
)
|
|
578
|
-
|
|
579
|
-
return "\n".join(lines)
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
# CLI support
|
|
583
|
-
if __name__ == "__main__":
|
|
584
|
-
import asyncio
|
|
585
|
-
import sys
|
|
586
|
-
|
|
587
|
-
async def main():
|
|
588
|
-
wizard = CodeReviewWizard()
|
|
589
|
-
|
|
590
|
-
# Parse simple args
|
|
591
|
-
files = sys.argv[1:] if len(sys.argv) > 1 else []
|
|
592
|
-
staged = "--staged" in files
|
|
593
|
-
files = [f for f in files if not f.startswith("--")]
|
|
594
|
-
|
|
595
|
-
result = await wizard.analyze(
|
|
596
|
-
{
|
|
597
|
-
"files": files,
|
|
598
|
-
"staged_only": staged,
|
|
599
|
-
},
|
|
600
|
-
)
|
|
601
|
-
|
|
602
|
-
print(wizard.format_terminal_output(result))
|
|
603
|
-
|
|
604
|
-
asyncio.run(main())
|
|
@@ -1,50 +0,0 @@
|
|
|
1
|
-
"""Advanced Debugging Wizard - Sub-package
|
|
2
|
-
|
|
3
|
-
Protocol-based debugging using linting configuration pattern.
|
|
4
|
-
|
|
5
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
6
|
-
Licensed under Fair Source 0.9
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
from .bug_risk_analyzer import BugRisk, BugRiskAnalyzer, RiskAssessment
|
|
10
|
-
from .config_loaders import ConfigLoaderFactory, LintConfig, load_config
|
|
11
|
-
from .fix_applier import FixApplierFactory, FixResult, apply_fixes, group_issues_by_fixability
|
|
12
|
-
from .language_patterns import (
|
|
13
|
-
CrossLanguagePatternLibrary,
|
|
14
|
-
PatternCategory,
|
|
15
|
-
UniversalPattern,
|
|
16
|
-
get_pattern_library,
|
|
17
|
-
)
|
|
18
|
-
from .linter_parsers import LinterParserFactory, LintIssue, Severity, parse_linter_output
|
|
19
|
-
from .verification import VerificationResult, compare_issue_lists, run_linter, verify_fixes
|
|
20
|
-
|
|
21
|
-
__all__ = [
|
|
22
|
-
# Risk Analysis (Level 4)
|
|
23
|
-
"BugRisk",
|
|
24
|
-
"BugRiskAnalyzer",
|
|
25
|
-
"ConfigLoaderFactory",
|
|
26
|
-
"CrossLanguagePatternLibrary",
|
|
27
|
-
"FixApplierFactory",
|
|
28
|
-
# Fixing
|
|
29
|
-
"FixResult",
|
|
30
|
-
# Config
|
|
31
|
-
"LintConfig",
|
|
32
|
-
# Parsing
|
|
33
|
-
"LintIssue",
|
|
34
|
-
"LinterParserFactory",
|
|
35
|
-
"PatternCategory",
|
|
36
|
-
"RiskAssessment",
|
|
37
|
-
"Severity",
|
|
38
|
-
# Cross-Language Patterns (Level 5)
|
|
39
|
-
"UniversalPattern",
|
|
40
|
-
# Verification
|
|
41
|
-
"VerificationResult",
|
|
42
|
-
"apply_fixes",
|
|
43
|
-
"compare_issue_lists",
|
|
44
|
-
"get_pattern_library",
|
|
45
|
-
"group_issues_by_fixability",
|
|
46
|
-
"load_config",
|
|
47
|
-
"parse_linter_output",
|
|
48
|
-
"run_linter",
|
|
49
|
-
"verify_fixes",
|
|
50
|
-
]
|