gitflow-analytics 3.6.2__py3-none-any.whl → 3.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitflow_analytics/__init__.py +8 -12
- gitflow_analytics/_version.py +1 -1
- gitflow_analytics/cli.py +151 -170
- gitflow_analytics/cli_wizards/install_wizard.py +5 -5
- gitflow_analytics/models/database.py +229 -8
- gitflow_analytics/security/reports/__init__.py +5 -0
- gitflow_analytics/security/reports/security_report.py +358 -0
- {gitflow_analytics-3.6.2.dist-info → gitflow_analytics-3.7.0.dist-info}/METADATA +2 -4
- {gitflow_analytics-3.6.2.dist-info → gitflow_analytics-3.7.0.dist-info}/RECORD +13 -24
- gitflow_analytics/tui/__init__.py +0 -5
- gitflow_analytics/tui/app.py +0 -726
- gitflow_analytics/tui/progress_adapter.py +0 -313
- gitflow_analytics/tui/screens/__init__.py +0 -8
- gitflow_analytics/tui/screens/analysis_progress_screen.py +0 -857
- gitflow_analytics/tui/screens/configuration_screen.py +0 -523
- gitflow_analytics/tui/screens/loading_screen.py +0 -348
- gitflow_analytics/tui/screens/main_screen.py +0 -321
- gitflow_analytics/tui/screens/results_screen.py +0 -735
- gitflow_analytics/tui/widgets/__init__.py +0 -7
- gitflow_analytics/tui/widgets/data_table.py +0 -255
- gitflow_analytics/tui/widgets/export_modal.py +0 -301
- gitflow_analytics/tui/widgets/progress_widget.py +0 -187
- {gitflow_analytics-3.6.2.dist-info → gitflow_analytics-3.7.0.dist-info}/WHEEL +0 -0
- {gitflow_analytics-3.6.2.dist-info → gitflow_analytics-3.7.0.dist-info}/entry_points.txt +0 -0
- {gitflow_analytics-3.6.2.dist-info → gitflow_analytics-3.7.0.dist-info}/licenses/LICENSE +0 -0
- {gitflow_analytics-3.6.2.dist-info → gitflow_analytics-3.7.0.dist-info}/top_level.txt +0 -0
|
@@ -5,7 +5,7 @@ import os
|
|
|
5
5
|
import tempfile
|
|
6
6
|
from datetime import datetime, timezone
|
|
7
7
|
from pathlib import Path
|
|
8
|
-
from typing import Any
|
|
8
|
+
from typing import Any, Optional
|
|
9
9
|
|
|
10
10
|
from sqlalchemy import (
|
|
11
11
|
JSON,
|
|
@@ -869,9 +869,30 @@ class WeeklyTrends(Base):
|
|
|
869
869
|
)
|
|
870
870
|
|
|
871
871
|
|
|
872
|
+
class SchemaVersion(Base):
|
|
873
|
+
"""Track database schema versions for automatic migrations.
|
|
874
|
+
|
|
875
|
+
WHY: Schema changes (like timezone-aware timestamps) require migration
|
|
876
|
+
to ensure old cache databases work correctly without user intervention.
|
|
877
|
+
This table tracks the current schema version to trigger automatic upgrades.
|
|
878
|
+
"""
|
|
879
|
+
|
|
880
|
+
__tablename__ = "schema_version"
|
|
881
|
+
|
|
882
|
+
id = Column(Integer, primary_key=True)
|
|
883
|
+
version = Column(String, nullable=False) # e.g., "2.0"
|
|
884
|
+
upgraded_at = Column(DateTime(timezone=True), default=utcnow_tz_aware)
|
|
885
|
+
previous_version = Column(String, nullable=True)
|
|
886
|
+
migration_notes = Column(String, nullable=True)
|
|
887
|
+
|
|
888
|
+
|
|
872
889
|
class Database:
|
|
873
890
|
"""Database connection manager with robust permission handling."""
|
|
874
891
|
|
|
892
|
+
# Schema version constants
|
|
893
|
+
CURRENT_SCHEMA_VERSION = "2.0" # Timezone-aware timestamps
|
|
894
|
+
LEGACY_SCHEMA_VERSION = "1.0" # Timezone-naive timestamps
|
|
895
|
+
|
|
875
896
|
def __init__(self, db_path: Path):
|
|
876
897
|
"""
|
|
877
898
|
Initialize database connection with proper error handling.
|
|
@@ -949,10 +970,21 @@ class Database:
|
|
|
949
970
|
},
|
|
950
971
|
)
|
|
951
972
|
|
|
952
|
-
#
|
|
953
|
-
Base.metadata.create_all(self.engine)
|
|
973
|
+
# Check schema version BEFORE creating tables to detect legacy databases
|
|
954
974
|
self.SessionLocal = sessionmaker(bind=self.engine)
|
|
955
|
-
|
|
975
|
+
needs_migration = self._check_schema_version_before_create()
|
|
976
|
+
|
|
977
|
+
# Create/update tables
|
|
978
|
+
Base.metadata.create_all(self.engine)
|
|
979
|
+
|
|
980
|
+
# Perform migration if needed (after tables are created/updated)
|
|
981
|
+
if needs_migration:
|
|
982
|
+
self._perform_schema_migration()
|
|
983
|
+
else:
|
|
984
|
+
# No migration needed - record current schema version if not already recorded
|
|
985
|
+
self._ensure_schema_version_recorded()
|
|
986
|
+
|
|
987
|
+
# Apply other migrations for existing databases
|
|
956
988
|
self._apply_migrations()
|
|
957
989
|
|
|
958
990
|
# Test that we can actually write to the database
|
|
@@ -988,9 +1020,21 @@ class Database:
|
|
|
988
1020
|
},
|
|
989
1021
|
)
|
|
990
1022
|
|
|
991
|
-
|
|
1023
|
+
# Check schema version BEFORE creating tables to detect legacy databases
|
|
992
1024
|
self.SessionLocal = sessionmaker(bind=self.engine)
|
|
993
|
-
|
|
1025
|
+
needs_migration = self._check_schema_version_before_create()
|
|
1026
|
+
|
|
1027
|
+
# Create/update tables
|
|
1028
|
+
Base.metadata.create_all(self.engine)
|
|
1029
|
+
|
|
1030
|
+
# Perform migration if needed (after tables are created/updated)
|
|
1031
|
+
if needs_migration:
|
|
1032
|
+
self._perform_schema_migration()
|
|
1033
|
+
else:
|
|
1034
|
+
# No migration needed - record current schema version if not already recorded
|
|
1035
|
+
self._ensure_schema_version_recorded()
|
|
1036
|
+
|
|
1037
|
+
# Apply other migrations for existing databases
|
|
994
1038
|
self._apply_migrations()
|
|
995
1039
|
|
|
996
1040
|
# Test write capability
|
|
@@ -1023,9 +1067,21 @@ class Database:
|
|
|
1023
1067
|
"sqlite:///:memory:", connect_args={"check_same_thread": False}
|
|
1024
1068
|
)
|
|
1025
1069
|
|
|
1026
|
-
|
|
1070
|
+
# Check schema version BEFORE creating tables to detect legacy databases
|
|
1027
1071
|
self.SessionLocal = sessionmaker(bind=self.engine)
|
|
1028
|
-
|
|
1072
|
+
needs_migration = self._check_schema_version_before_create()
|
|
1073
|
+
|
|
1074
|
+
# Create/update tables
|
|
1075
|
+
Base.metadata.create_all(self.engine)
|
|
1076
|
+
|
|
1077
|
+
# Perform migration if needed (after tables are created/updated)
|
|
1078
|
+
if needs_migration:
|
|
1079
|
+
self._perform_schema_migration()
|
|
1080
|
+
else:
|
|
1081
|
+
# No migration needed - record current schema version if not already recorded
|
|
1082
|
+
self._ensure_schema_version_recorded()
|
|
1083
|
+
|
|
1084
|
+
# Apply other migrations for existing databases
|
|
1029
1085
|
self._apply_migrations()
|
|
1030
1086
|
|
|
1031
1087
|
self.is_readonly_fallback = True
|
|
@@ -1117,9 +1173,174 @@ class Database:
|
|
|
1117
1173
|
|
|
1118
1174
|
def init_db(self) -> None:
|
|
1119
1175
|
"""Initialize database tables and apply migrations."""
|
|
1176
|
+
needs_migration = self._check_schema_version_before_create()
|
|
1120
1177
|
Base.metadata.create_all(self.engine)
|
|
1178
|
+
if needs_migration:
|
|
1179
|
+
self._perform_schema_migration()
|
|
1180
|
+
else:
|
|
1181
|
+
self._ensure_schema_version_recorded()
|
|
1121
1182
|
self._apply_migrations()
|
|
1122
1183
|
|
|
1184
|
+
def _check_schema_version_before_create(self) -> bool:
|
|
1185
|
+
"""Check if database needs migration BEFORE create_all is called.
|
|
1186
|
+
|
|
1187
|
+
WHY: We need to check for legacy databases BEFORE creating new tables,
|
|
1188
|
+
otherwise we can't distinguish between a fresh database and a legacy one.
|
|
1189
|
+
|
|
1190
|
+
Returns:
|
|
1191
|
+
True if migration is needed, False otherwise
|
|
1192
|
+
"""
|
|
1193
|
+
try:
|
|
1194
|
+
with self.engine.connect() as conn:
|
|
1195
|
+
# Check if schema_version table exists
|
|
1196
|
+
result = conn.execute(
|
|
1197
|
+
text(
|
|
1198
|
+
"SELECT name FROM sqlite_master WHERE type='table' AND name='schema_version'"
|
|
1199
|
+
)
|
|
1200
|
+
)
|
|
1201
|
+
schema_table_exists = result.fetchone() is not None
|
|
1202
|
+
|
|
1203
|
+
if schema_table_exists:
|
|
1204
|
+
# Check current version
|
|
1205
|
+
result = conn.execute(
|
|
1206
|
+
text("SELECT version FROM schema_version ORDER BY id DESC LIMIT 1")
|
|
1207
|
+
)
|
|
1208
|
+
row = result.fetchone()
|
|
1209
|
+
|
|
1210
|
+
if row and row[0] != self.CURRENT_SCHEMA_VERSION:
|
|
1211
|
+
# Version mismatch - needs migration
|
|
1212
|
+
logger.warning(
|
|
1213
|
+
f"⚠️ Schema version mismatch: {row[0]} → {self.CURRENT_SCHEMA_VERSION}"
|
|
1214
|
+
)
|
|
1215
|
+
return True
|
|
1216
|
+
# else: Already at current version or no version record yet
|
|
1217
|
+
return False
|
|
1218
|
+
else:
|
|
1219
|
+
# No schema_version table - check if this is legacy or new
|
|
1220
|
+
result = conn.execute(
|
|
1221
|
+
text(
|
|
1222
|
+
"SELECT name FROM sqlite_master WHERE type='table' AND name='cached_commits'"
|
|
1223
|
+
)
|
|
1224
|
+
)
|
|
1225
|
+
has_cached_commits = result.fetchone() is not None
|
|
1226
|
+
|
|
1227
|
+
if has_cached_commits:
|
|
1228
|
+
# Check if table has data
|
|
1229
|
+
result = conn.execute(text("SELECT COUNT(*) FROM cached_commits"))
|
|
1230
|
+
commit_count = result.fetchone()[0]
|
|
1231
|
+
|
|
1232
|
+
if commit_count > 0:
|
|
1233
|
+
# Legacy database with data - needs migration
|
|
1234
|
+
logger.warning("⚠️ Old cache schema detected (v1.0 → v2.0)")
|
|
1235
|
+
logger.info(" This is a one-time operation due to timezone fix")
|
|
1236
|
+
return True
|
|
1237
|
+
|
|
1238
|
+
# New database or empty legacy database - no migration needed
|
|
1239
|
+
return False
|
|
1240
|
+
|
|
1241
|
+
except Exception as e:
|
|
1242
|
+
# Don't fail initialization due to schema check issues
|
|
1243
|
+
logger.debug(f"Schema version check failed: {e}")
|
|
1244
|
+
return False
|
|
1245
|
+
|
|
1246
|
+
def _perform_schema_migration(self) -> None:
|
|
1247
|
+
"""Perform the actual schema migration after tables are created.
|
|
1248
|
+
|
|
1249
|
+
WHY: Separating migration from detection allows us to update table schemas
|
|
1250
|
+
via create_all before clearing/migrating data.
|
|
1251
|
+
"""
|
|
1252
|
+
try:
|
|
1253
|
+
with self.engine.connect() as conn:
|
|
1254
|
+
logger.info("🔄 Automatically upgrading cache database...")
|
|
1255
|
+
logger.info(" Clearing old cache data (timezone schema incompatible)...")
|
|
1256
|
+
|
|
1257
|
+
# Clear cached data tables
|
|
1258
|
+
conn.execute(text("DELETE FROM cached_commits"))
|
|
1259
|
+
conn.execute(text("DELETE FROM pull_request_cache"))
|
|
1260
|
+
conn.execute(text("DELETE FROM issue_cache"))
|
|
1261
|
+
conn.execute(text("DELETE FROM repository_analysis_status"))
|
|
1262
|
+
|
|
1263
|
+
# Also clear qualitative analysis data if it exists
|
|
1264
|
+
try:
|
|
1265
|
+
conn.execute(text("DELETE FROM qualitative_commits"))
|
|
1266
|
+
conn.execute(text("DELETE FROM pattern_cache"))
|
|
1267
|
+
except Exception:
|
|
1268
|
+
# These tables might not exist in all databases
|
|
1269
|
+
pass
|
|
1270
|
+
|
|
1271
|
+
conn.commit()
|
|
1272
|
+
|
|
1273
|
+
# Record the schema upgrade
|
|
1274
|
+
self._record_schema_version(
|
|
1275
|
+
conn,
|
|
1276
|
+
self.CURRENT_SCHEMA_VERSION,
|
|
1277
|
+
self.LEGACY_SCHEMA_VERSION,
|
|
1278
|
+
"Migrated to timezone-aware timestamps (v2.0)",
|
|
1279
|
+
)
|
|
1280
|
+
|
|
1281
|
+
logger.info(" Migration complete - cache will be rebuilt on next analysis")
|
|
1282
|
+
logger.info("✅ Cache database upgraded successfully")
|
|
1283
|
+
|
|
1284
|
+
except Exception as e:
|
|
1285
|
+
logger.error(f"Migration failed: {e}")
|
|
1286
|
+
# Don't raise - let the system continue and rebuild cache from scratch
|
|
1287
|
+
|
|
1288
|
+
def _ensure_schema_version_recorded(self) -> None:
|
|
1289
|
+
"""Ensure schema version is recorded for databases that didn't need migration.
|
|
1290
|
+
|
|
1291
|
+
WHY: Fresh databases and already-migrated databases need to have their
|
|
1292
|
+
schema version recorded for future migration detection.
|
|
1293
|
+
"""
|
|
1294
|
+
try:
|
|
1295
|
+
with self.engine.connect() as conn:
|
|
1296
|
+
# Check if version is already recorded
|
|
1297
|
+
result = conn.execute(text("SELECT COUNT(*) FROM schema_version"))
|
|
1298
|
+
count = result.fetchone()[0]
|
|
1299
|
+
|
|
1300
|
+
if count == 0:
|
|
1301
|
+
# No version recorded - this is a fresh database
|
|
1302
|
+
self._record_schema_version(
|
|
1303
|
+
conn, self.CURRENT_SCHEMA_VERSION, None, "Initial schema creation"
|
|
1304
|
+
)
|
|
1305
|
+
logger.debug(f"Recorded initial schema version: {self.CURRENT_SCHEMA_VERSION}")
|
|
1306
|
+
|
|
1307
|
+
except Exception as e:
|
|
1308
|
+
# Don't fail if we can't record version
|
|
1309
|
+
logger.debug(f"Could not ensure schema version recorded: {e}")
|
|
1310
|
+
|
|
1311
|
+
def _record_schema_version(
|
|
1312
|
+
self, conn, version: str, previous_version: Optional[str], notes: Optional[str]
|
|
1313
|
+
) -> None:
|
|
1314
|
+
"""Record schema version in the database.
|
|
1315
|
+
|
|
1316
|
+
Args:
|
|
1317
|
+
conn: Database connection
|
|
1318
|
+
version: New schema version
|
|
1319
|
+
previous_version: Previous schema version (None for initial)
|
|
1320
|
+
notes: Migration notes
|
|
1321
|
+
"""
|
|
1322
|
+
try:
|
|
1323
|
+
from datetime import datetime, timezone
|
|
1324
|
+
|
|
1325
|
+
# Insert new schema version record
|
|
1326
|
+
conn.execute(
|
|
1327
|
+
text(
|
|
1328
|
+
"""
|
|
1329
|
+
INSERT INTO schema_version (version, upgraded_at, previous_version, migration_notes)
|
|
1330
|
+
VALUES (:version, :upgraded_at, :previous_version, :notes)
|
|
1331
|
+
"""
|
|
1332
|
+
),
|
|
1333
|
+
{
|
|
1334
|
+
"version": version,
|
|
1335
|
+
"upgraded_at": datetime.now(timezone.utc),
|
|
1336
|
+
"previous_version": previous_version,
|
|
1337
|
+
"notes": notes,
|
|
1338
|
+
},
|
|
1339
|
+
)
|
|
1340
|
+
conn.commit()
|
|
1341
|
+
except Exception as e:
|
|
1342
|
+
logger.debug(f"Could not record schema version: {e}")
|
|
1343
|
+
|
|
1123
1344
|
def _apply_migrations(self) -> None:
|
|
1124
1345
|
"""Apply database migrations for backward compatibility.
|
|
1125
1346
|
|
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
"""Generate security analysis reports."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import csv
|
|
5
|
+
from typing import List, Dict, Any, Optional
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from ..security_analyzer import SecurityAnalysis
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SecurityReportGenerator:
|
|
12
|
+
"""Generate various format reports for security findings."""
|
|
13
|
+
|
|
14
|
+
def __init__(self, output_dir: Optional[Path] = None):
|
|
15
|
+
"""Initialize report generator.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
output_dir: Directory for report output
|
|
19
|
+
"""
|
|
20
|
+
self.output_dir = output_dir or Path("reports")
|
|
21
|
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
|
22
|
+
|
|
23
|
+
def generate_reports(self, analyses: List[SecurityAnalysis], summary: Dict[str, Any]) -> Dict[str, Path]:
|
|
24
|
+
"""Generate all report formats.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
analyses: List of security analyses
|
|
28
|
+
summary: Summary statistics
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Dictionary of report type to file path
|
|
32
|
+
"""
|
|
33
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
34
|
+
reports = {}
|
|
35
|
+
|
|
36
|
+
# Generate Markdown report
|
|
37
|
+
md_path = self.output_dir / f"security_report_{timestamp}.md"
|
|
38
|
+
self._generate_markdown_report(analyses, summary, md_path)
|
|
39
|
+
reports["markdown"] = md_path
|
|
40
|
+
|
|
41
|
+
# Generate JSON report
|
|
42
|
+
json_path = self.output_dir / f"security_findings_{timestamp}.json"
|
|
43
|
+
self._generate_json_report(analyses, summary, json_path)
|
|
44
|
+
reports["json"] = json_path
|
|
45
|
+
|
|
46
|
+
# Generate CSV report
|
|
47
|
+
csv_path = self.output_dir / f"security_issues_{timestamp}.csv"
|
|
48
|
+
self._generate_csv_report(analyses, csv_path)
|
|
49
|
+
reports["csv"] = csv_path
|
|
50
|
+
|
|
51
|
+
# Generate SARIF report if requested
|
|
52
|
+
if any(a.total_findings > 0 for a in analyses):
|
|
53
|
+
sarif_path = self.output_dir / f"security_sarif_{timestamp}.json"
|
|
54
|
+
self._generate_sarif_report(analyses, sarif_path)
|
|
55
|
+
reports["sarif"] = sarif_path
|
|
56
|
+
|
|
57
|
+
return reports
|
|
58
|
+
|
|
59
|
+
def _generate_markdown_report(self, analyses: List[SecurityAnalysis], summary: Dict, path: Path) -> None:
|
|
60
|
+
"""Generate comprehensive Markdown security report."""
|
|
61
|
+
with open(path, 'w') as f:
|
|
62
|
+
# Header
|
|
63
|
+
f.write("# 🔒 Security Analysis Report\n\n")
|
|
64
|
+
f.write(f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
|
|
65
|
+
|
|
66
|
+
# Executive Summary
|
|
67
|
+
f.write("## 📊 Executive Summary\n\n")
|
|
68
|
+
f.write(f"- **Commits Analyzed**: {summary['total_commits']}\n")
|
|
69
|
+
f.write(f"- **Commits with Issues**: {summary['commits_with_issues']}\n")
|
|
70
|
+
f.write(f"- **Total Findings**: {summary['total_findings']}\n")
|
|
71
|
+
f.write(f"- **Risk Level**: **{summary['risk_level']}** (Score: {summary['average_risk_score']})\n\n")
|
|
72
|
+
|
|
73
|
+
# Risk Assessment
|
|
74
|
+
self._write_risk_assessment(f, summary)
|
|
75
|
+
|
|
76
|
+
# Severity Distribution
|
|
77
|
+
f.write("## 🎯 Severity Distribution\n\n")
|
|
78
|
+
severity = summary['severity_distribution']
|
|
79
|
+
if severity['critical'] > 0:
|
|
80
|
+
f.write(f"- 🔴 **Critical**: {severity['critical']}\n")
|
|
81
|
+
if severity['high'] > 0:
|
|
82
|
+
f.write(f"- 🟠 **High**: {severity['high']}\n")
|
|
83
|
+
if severity['medium'] > 0:
|
|
84
|
+
f.write(f"- 🟡 **Medium**: {severity['medium']}\n")
|
|
85
|
+
if severity['low'] > 0:
|
|
86
|
+
f.write(f"- 🟢 **Low**: {severity['low']}\n")
|
|
87
|
+
f.write("\n")
|
|
88
|
+
|
|
89
|
+
# Top Issues
|
|
90
|
+
if summary['top_issues']:
|
|
91
|
+
f.write("## 🔝 Top Security Issues\n\n")
|
|
92
|
+
f.write("| Issue Type | Severity | Occurrences | Affected Files |\n")
|
|
93
|
+
f.write("|------------|----------|-------------|----------------|\n")
|
|
94
|
+
for issue in summary['top_issues']:
|
|
95
|
+
f.write(f"| {issue['type']} | {issue['severity'].upper()} | "
|
|
96
|
+
f"{issue['occurrences']} | {issue['affected_files']} |\n")
|
|
97
|
+
f.write("\n")
|
|
98
|
+
|
|
99
|
+
# Detailed Findings by Category
|
|
100
|
+
self._write_detailed_findings(f, analyses)
|
|
101
|
+
|
|
102
|
+
# LLM Insights
|
|
103
|
+
if 'llm_insights' in summary and summary['llm_insights']:
|
|
104
|
+
f.write("## 🤖 AI Security Insights\n\n")
|
|
105
|
+
f.write(summary['llm_insights'])
|
|
106
|
+
f.write("\n\n")
|
|
107
|
+
|
|
108
|
+
# Recommendations
|
|
109
|
+
f.write("## 💡 Recommendations\n\n")
|
|
110
|
+
for rec in summary['recommendations']:
|
|
111
|
+
f.write(f"- {rec}\n")
|
|
112
|
+
f.write("\n")
|
|
113
|
+
|
|
114
|
+
# Appendix - All Findings
|
|
115
|
+
f.write("## 📋 Detailed Findings\n\n")
|
|
116
|
+
self._write_all_findings(f, analyses)
|
|
117
|
+
|
|
118
|
+
def _write_risk_assessment(self, f, summary: Dict) -> None:
|
|
119
|
+
"""Write risk assessment section."""
|
|
120
|
+
risk_level = summary['risk_level']
|
|
121
|
+
score = summary['average_risk_score']
|
|
122
|
+
|
|
123
|
+
f.write("## ⚠️ Risk Assessment\n\n")
|
|
124
|
+
|
|
125
|
+
if risk_level == "CRITICAL":
|
|
126
|
+
f.write("### 🚨 CRITICAL RISK DETECTED\n\n")
|
|
127
|
+
f.write("Immediate action required. Critical security vulnerabilities have been identified "
|
|
128
|
+
"that could lead to severe security breaches.\n\n")
|
|
129
|
+
elif risk_level == "HIGH":
|
|
130
|
+
f.write("### 🔴 High Risk\n\n")
|
|
131
|
+
f.write("Significant security issues detected that should be addressed urgently.\n\n")
|
|
132
|
+
elif risk_level == "MEDIUM":
|
|
133
|
+
f.write("### 🟡 Medium Risk\n\n")
|
|
134
|
+
f.write("Moderate security concerns identified that should be addressed in the near term.\n\n")
|
|
135
|
+
else:
|
|
136
|
+
f.write("### 🟢 Low Risk\n\n")
|
|
137
|
+
f.write("Minor security issues detected. Continue with regular security practices.\n\n")
|
|
138
|
+
|
|
139
|
+
# Risk score visualization
|
|
140
|
+
f.write("**Risk Score Breakdown**:\n")
|
|
141
|
+
f.write("```\n")
|
|
142
|
+
bar_length = 50
|
|
143
|
+
filled = int(score / 100 * bar_length)
|
|
144
|
+
bar = "█" * filled + "░" * (bar_length - filled)
|
|
145
|
+
f.write(f"[{bar}] {score:.1f}/100\n")
|
|
146
|
+
f.write("```\n\n")
|
|
147
|
+
|
|
148
|
+
def _write_detailed_findings(self, f, analyses: List[SecurityAnalysis]) -> None:
|
|
149
|
+
"""Write detailed findings by category."""
|
|
150
|
+
# Aggregate findings
|
|
151
|
+
all_secrets = []
|
|
152
|
+
all_vulnerabilities = []
|
|
153
|
+
all_dependencies = []
|
|
154
|
+
all_llm = []
|
|
155
|
+
|
|
156
|
+
for analysis in analyses:
|
|
157
|
+
all_secrets.extend(analysis.secrets)
|
|
158
|
+
all_vulnerabilities.extend(analysis.vulnerabilities)
|
|
159
|
+
all_dependencies.extend(analysis.dependency_issues)
|
|
160
|
+
all_llm.extend(analysis.llm_findings)
|
|
161
|
+
|
|
162
|
+
# Secrets Section
|
|
163
|
+
if all_secrets:
|
|
164
|
+
f.write("## 🔑 Exposed Secrets\n\n")
|
|
165
|
+
f.write(f"**Total**: {len(all_secrets)} potential secrets detected\n\n")
|
|
166
|
+
|
|
167
|
+
# Group by secret type
|
|
168
|
+
by_type = {}
|
|
169
|
+
for secret in all_secrets:
|
|
170
|
+
secret_type = secret.get('secret_type', 'unknown')
|
|
171
|
+
if secret_type not in by_type:
|
|
172
|
+
by_type[secret_type] = []
|
|
173
|
+
by_type[secret_type].append(secret)
|
|
174
|
+
|
|
175
|
+
for secret_type, secrets in sorted(by_type.items()):
|
|
176
|
+
f.write(f"### {secret_type.replace('_', ' ').title()}\n")
|
|
177
|
+
for s in secrets[:5]: # Show first 5 of each type
|
|
178
|
+
f.write(f"- **File**: `{s.get('file', 'unknown')}`\n")
|
|
179
|
+
f.write(f" - Line: {s.get('line', 'N/A')}\n")
|
|
180
|
+
f.write(f" - Pattern: `{s.get('match', 'N/A')}`\n")
|
|
181
|
+
if len(secrets) > 5:
|
|
182
|
+
f.write(f" - *... and {len(secrets) - 5} more*\n")
|
|
183
|
+
f.write("\n")
|
|
184
|
+
|
|
185
|
+
# Vulnerabilities Section
|
|
186
|
+
if all_vulnerabilities:
|
|
187
|
+
f.write("## 🛡️ Code Vulnerabilities\n\n")
|
|
188
|
+
f.write(f"**Total**: {len(all_vulnerabilities)} vulnerabilities detected\n\n")
|
|
189
|
+
|
|
190
|
+
# Group by vulnerability type
|
|
191
|
+
by_type = {}
|
|
192
|
+
for vuln in all_vulnerabilities:
|
|
193
|
+
vuln_type = vuln.get('vulnerability_type', 'unknown')
|
|
194
|
+
if vuln_type not in by_type:
|
|
195
|
+
by_type[vuln_type] = []
|
|
196
|
+
by_type[vuln_type].append(vuln)
|
|
197
|
+
|
|
198
|
+
for vuln_type, vulns in sorted(by_type.items()):
|
|
199
|
+
f.write(f"### {vuln_type.replace('_', ' ').title()}\n")
|
|
200
|
+
for v in vulns[:5]:
|
|
201
|
+
f.write(f"- **File**: `{v.get('file', 'unknown')}:{v.get('line', 'N/A')}`\n")
|
|
202
|
+
f.write(f" - Tool: {v.get('tool', 'N/A')}\n")
|
|
203
|
+
f.write(f" - Message: {v.get('message', 'N/A')}\n")
|
|
204
|
+
if len(vulns) > 5:
|
|
205
|
+
f.write(f" - *... and {len(vulns) - 5} more*\n")
|
|
206
|
+
f.write("\n")
|
|
207
|
+
|
|
208
|
+
# Dependencies Section
|
|
209
|
+
if all_dependencies:
|
|
210
|
+
f.write("## 📦 Vulnerable Dependencies\n\n")
|
|
211
|
+
f.write(f"**Total**: {len(all_dependencies)} vulnerable dependencies\n\n")
|
|
212
|
+
|
|
213
|
+
for dep in all_dependencies[:10]:
|
|
214
|
+
f.write(f"- **{dep.get('package', 'unknown')}** @ {dep.get('version', 'unknown')}\n")
|
|
215
|
+
f.write(f" - File: `{dep.get('file', 'unknown')}`\n")
|
|
216
|
+
if dep.get('cve'):
|
|
217
|
+
f.write(f" - CVE: {dep['cve']}\n")
|
|
218
|
+
f.write(f" - Message: {dep.get('message', 'N/A')}\n")
|
|
219
|
+
if len(all_dependencies) > 10:
|
|
220
|
+
f.write(f"\n*... and {len(all_dependencies) - 10} more vulnerable dependencies*\n")
|
|
221
|
+
f.write("\n")
|
|
222
|
+
|
|
223
|
+
def _write_all_findings(self, f, analyses: List[SecurityAnalysis]) -> None:
|
|
224
|
+
"""Write all findings in detail."""
|
|
225
|
+
for analysis in analyses:
|
|
226
|
+
if analysis.total_findings == 0:
|
|
227
|
+
continue
|
|
228
|
+
|
|
229
|
+
f.write(f"### Commit: `{analysis.commit_hash[:8]}`\n")
|
|
230
|
+
f.write(f"**Time**: {analysis.timestamp.strftime('%Y-%m-%d %H:%M:%S')}\n")
|
|
231
|
+
f.write(f"**Files Changed**: {len(analysis.files_changed)}\n")
|
|
232
|
+
f.write(f"**Risk Score**: {analysis.risk_score:.1f}\n\n")
|
|
233
|
+
|
|
234
|
+
if analysis.secrets:
|
|
235
|
+
f.write("**Secrets**:\n")
|
|
236
|
+
for s in analysis.secrets:
|
|
237
|
+
f.write(f"- {s.get('secret_type', 'unknown')}: {s.get('file', 'N/A')}\n")
|
|
238
|
+
|
|
239
|
+
if analysis.vulnerabilities:
|
|
240
|
+
f.write("**Vulnerabilities**:\n")
|
|
241
|
+
for v in analysis.vulnerabilities:
|
|
242
|
+
f.write(f"- {v.get('vulnerability_type', 'unknown')}: {v.get('file', 'N/A')}\n")
|
|
243
|
+
|
|
244
|
+
f.write("\n---\n\n")
|
|
245
|
+
|
|
246
|
+
def _generate_json_report(self, analyses: List[SecurityAnalysis], summary: Dict, path: Path) -> None:
|
|
247
|
+
"""Generate JSON report with all findings."""
|
|
248
|
+
report = {
|
|
249
|
+
"metadata": {
|
|
250
|
+
"generated": datetime.now().isoformat(),
|
|
251
|
+
"version": "1.0.0"
|
|
252
|
+
},
|
|
253
|
+
"summary": summary,
|
|
254
|
+
"analyses": []
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
for analysis in analyses:
|
|
258
|
+
report["analyses"].append({
|
|
259
|
+
"commit_hash": analysis.commit_hash,
|
|
260
|
+
"timestamp": analysis.timestamp.isoformat(),
|
|
261
|
+
"files_changed": analysis.files_changed,
|
|
262
|
+
"risk_score": analysis.risk_score,
|
|
263
|
+
"findings": {
|
|
264
|
+
"secrets": analysis.secrets,
|
|
265
|
+
"vulnerabilities": analysis.vulnerabilities,
|
|
266
|
+
"dependency_issues": analysis.dependency_issues,
|
|
267
|
+
"llm_findings": analysis.llm_findings
|
|
268
|
+
},
|
|
269
|
+
"metrics": {
|
|
270
|
+
"total": analysis.total_findings,
|
|
271
|
+
"critical": analysis.critical_count,
|
|
272
|
+
"high": analysis.high_count,
|
|
273
|
+
"medium": analysis.medium_count,
|
|
274
|
+
"low": analysis.low_count
|
|
275
|
+
}
|
|
276
|
+
})
|
|
277
|
+
|
|
278
|
+
with open(path, 'w') as f:
|
|
279
|
+
json.dump(report, f, indent=2)
|
|
280
|
+
|
|
281
|
+
def _generate_csv_report(self, analyses: List[SecurityAnalysis], path: Path) -> None:
|
|
282
|
+
"""Generate CSV report of all findings."""
|
|
283
|
+
with open(path, 'w', newline='') as f:
|
|
284
|
+
writer = csv.DictWriter(f, fieldnames=[
|
|
285
|
+
'commit_hash', 'timestamp', 'type', 'severity',
|
|
286
|
+
'category', 'file', 'line', 'message', 'tool', 'confidence'
|
|
287
|
+
])
|
|
288
|
+
writer.writeheader()
|
|
289
|
+
|
|
290
|
+
for analysis in analyses:
|
|
291
|
+
# Write all findings
|
|
292
|
+
for finding in (analysis.secrets + analysis.vulnerabilities +
|
|
293
|
+
analysis.dependency_issues + analysis.llm_findings):
|
|
294
|
+
writer.writerow({
|
|
295
|
+
'commit_hash': analysis.commit_hash[:8],
|
|
296
|
+
'timestamp': analysis.timestamp.isoformat(),
|
|
297
|
+
'type': finding.get('type', 'unknown'),
|
|
298
|
+
'severity': finding.get('severity', 'medium'),
|
|
299
|
+
'category': finding.get('vulnerability_type',
|
|
300
|
+
finding.get('secret_type', 'unknown')),
|
|
301
|
+
'file': finding.get('file', ''),
|
|
302
|
+
'line': finding.get('line', ''),
|
|
303
|
+
'message': finding.get('message', ''),
|
|
304
|
+
'tool': finding.get('tool', finding.get('source', '')),
|
|
305
|
+
'confidence': finding.get('confidence', '')
|
|
306
|
+
})
|
|
307
|
+
|
|
308
|
+
def _generate_sarif_report(self, analyses: List[SecurityAnalysis], path: Path) -> None:
|
|
309
|
+
"""Generate SARIF format report for GitHub Security tab integration."""
|
|
310
|
+
sarif = {
|
|
311
|
+
"$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
|
|
312
|
+
"version": "2.1.0",
|
|
313
|
+
"runs": [{
|
|
314
|
+
"tool": {
|
|
315
|
+
"driver": {
|
|
316
|
+
"name": "GitFlow Analytics Security",
|
|
317
|
+
"version": "1.0.0",
|
|
318
|
+
"informationUri": "https://github.com/yourusername/gitflow-analytics"
|
|
319
|
+
}
|
|
320
|
+
},
|
|
321
|
+
"results": []
|
|
322
|
+
}]
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
for analysis in analyses:
|
|
326
|
+
for finding in (analysis.secrets + analysis.vulnerabilities):
|
|
327
|
+
result = {
|
|
328
|
+
"ruleId": finding.get('vulnerability_type',
|
|
329
|
+
finding.get('secret_type', 'unknown')),
|
|
330
|
+
"level": self._severity_to_sarif_level(finding.get('severity', 'medium')),
|
|
331
|
+
"message": {
|
|
332
|
+
"text": finding.get('message', 'Security issue detected')
|
|
333
|
+
},
|
|
334
|
+
"locations": [{
|
|
335
|
+
"physicalLocation": {
|
|
336
|
+
"artifactLocation": {
|
|
337
|
+
"uri": finding.get('file', 'unknown')
|
|
338
|
+
},
|
|
339
|
+
"region": {
|
|
340
|
+
"startLine": finding.get('line', 1)
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
}]
|
|
344
|
+
}
|
|
345
|
+
sarif["runs"][0]["results"].append(result)
|
|
346
|
+
|
|
347
|
+
with open(path, 'w') as f:
|
|
348
|
+
json.dump(sarif, f, indent=2)
|
|
349
|
+
|
|
350
|
+
def _severity_to_sarif_level(self, severity: str) -> str:
|
|
351
|
+
"""Convert severity to SARIF level."""
|
|
352
|
+
mapping = {
|
|
353
|
+
"critical": "error",
|
|
354
|
+
"high": "error",
|
|
355
|
+
"medium": "warning",
|
|
356
|
+
"low": "note"
|
|
357
|
+
}
|
|
358
|
+
return mapping.get(severity.lower(), "warning")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: gitflow-analytics
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.7.0
|
|
4
4
|
Summary: Analyze Git repositories for developer productivity insights
|
|
5
5
|
Author-email: Bob Matyas <bobmatnyc@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -52,10 +52,8 @@ Requires-Dist: types-PyYAML>=6.0; extra == "dev"
|
|
|
52
52
|
Requires-Dist: types-requests>=2.28; extra == "dev"
|
|
53
53
|
Provides-Extra: github
|
|
54
54
|
Requires-Dist: pygithub>=1.58; extra == "github"
|
|
55
|
-
Provides-Extra: tui
|
|
56
|
-
Requires-Dist: textual>=0.41.0; extra == "tui"
|
|
57
55
|
Provides-Extra: all
|
|
58
|
-
Requires-Dist: gitflow-analytics[github
|
|
56
|
+
Requires-Dist: gitflow-analytics[github]; extra == "all"
|
|
59
57
|
Dynamic: license-file
|
|
60
58
|
|
|
61
59
|
# GitFlow Analytics
|