iam-policy-validator 1.14.4__py3-none-any.whl → 1.14.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {iam_policy_validator-1.14.4.dist-info → iam_policy_validator-1.14.6.dist-info}/METADATA +1 -1
- {iam_policy_validator-1.14.4.dist-info → iam_policy_validator-1.14.6.dist-info}/RECORD +10 -10
- iam_validator/__version__.py +1 -1
- iam_validator/core/label_manager.py +30 -8
- iam_validator/core/pr_commenter.py +11 -1
- iam_validator/core/report.py +106 -28
- iam_validator/integrations/github_integration.py +20 -0
- {iam_policy_validator-1.14.4.dist-info → iam_policy_validator-1.14.6.dist-info}/WHEEL +0 -0
- {iam_policy_validator-1.14.4.dist-info → iam_policy_validator-1.14.6.dist-info}/entry_points.txt +0 -0
- {iam_policy_validator-1.14.4.dist-info → iam_policy_validator-1.14.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: iam-policy-validator
|
|
3
|
-
Version: 1.14.
|
|
3
|
+
Version: 1.14.6
|
|
4
4
|
Summary: Validate AWS IAM policies for correctness and security using AWS Service Reference API
|
|
5
5
|
Project-URL: Homepage, https://github.com/boogy/iam-policy-validator
|
|
6
6
|
Project-URL: Documentation, https://github.com/boogy/iam-policy-validator/tree/main/docs
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
iam_validator/__init__.py,sha256=xHdUASOxFHwEXfT_GSr_KrkLlnxZ-pAAr1wW1PwAGko,693
|
|
2
2
|
iam_validator/__main__.py,sha256=to_nz3n_IerJpVVZZ6WSFlFR5s_06J0csfPOTfQZG8g,197
|
|
3
|
-
iam_validator/__version__.py,sha256=
|
|
3
|
+
iam_validator/__version__.py,sha256=WpHTF6NviSa8VivWMnhlxgCYH_CGf0PJr8rvIQ0xQuQ,374
|
|
4
4
|
iam_validator/checks/__init__.py,sha256=OTkPnmlelu4YjMO8krjhu2wXiTV72RzopA5u1SfPQA0,1990
|
|
5
5
|
iam_validator/checks/action_condition_enforcement.py,sha256=2-XUMbof9tQ7SHZNmAHMkR1DgbOIzY2eFWlp9S9dwLk,60625
|
|
6
6
|
iam_validator/checks/action_resource_matching.py,sha256=qND0hfDgNoxFEdLWwrxOPVDfdj3k50nzedT2qF7nK7o,19428
|
|
@@ -49,12 +49,12 @@ iam_validator/core/finding_fingerprint.py,sha256=NJIlu8NhdenWbLS7ww8LyWFasJgpKWN
|
|
|
49
49
|
iam_validator/core/ignore_patterns.py,sha256=pZqDJBtkbck-85QK5eFPM5ZOPEKs3McRh3avqiCT5z0,10398
|
|
50
50
|
iam_validator/core/ignore_processor.py,sha256=zgWfS-4BU4c_W6VxUxHIHorMtB5XzB410wZ3bbzVgH8,10686
|
|
51
51
|
iam_validator/core/ignored_findings.py,sha256=b4PySz46so1rGKNt4prg2dkysHPfTJP4wsHYorVn1FA,12756
|
|
52
|
-
iam_validator/core/label_manager.py,sha256=
|
|
52
|
+
iam_validator/core/label_manager.py,sha256=qKQ60shsW8yJELkHgd9rXgzLW9oKErPd4hFTTQkHjbI,8776
|
|
53
53
|
iam_validator/core/models.py,sha256=lXUadIsTpp_j0Vt89Ez7aJkTKs2GD2ty3Ukl2NeY9Zo,15680
|
|
54
54
|
iam_validator/core/policy_checks.py,sha256=FNVuS2GTffwCjjrlupVIazC172gSxKYAAT_ObV6Apbo,8803
|
|
55
55
|
iam_validator/core/policy_loader.py,sha256=iid3mGfDzSXASzKDqbLnrqJHBdVQvvebofVqNImsGKM,29201
|
|
56
|
-
iam_validator/core/pr_commenter.py,sha256=
|
|
57
|
-
iam_validator/core/report.py,sha256=
|
|
56
|
+
iam_validator/core/pr_commenter.py,sha256=IZu2FQqzw73U_8ugTUq197ECLqk9mRCQpTWXPu5qk0k,35490
|
|
57
|
+
iam_validator/core/report.py,sha256=IDjBjSrzrE_JdkA8eTiSxyUL3g36sJMhTkxehEYzuBQ,45476
|
|
58
58
|
iam_validator/core/aws_service/__init__.py,sha256=UqMh4HUdGlx2QF5OoueJJ2UlCnhX4QW_x3KeE_bxRQc,735
|
|
59
59
|
iam_validator/core/aws_service/cache.py,sha256=DPuOOPPJC867KAYgV1e0RyQs_k3mtefMdYli3jPaN64,3589
|
|
60
60
|
iam_validator/core/aws_service/client.py,sha256=Zv7rIpEFdUCDXKGp3migPDkj8L5eZltgrGe64M2t2Ko,7336
|
|
@@ -85,7 +85,7 @@ iam_validator/core/formatters/json.py,sha256=A7gZ8P32GEdbDvrSn6v56yQ4fOP_kyMaoFV
|
|
|
85
85
|
iam_validator/core/formatters/markdown.py,sha256=dk4STeY-tOEZsVrlmolIEqZvWYP9JhRtygxxNA49DEE,2293
|
|
86
86
|
iam_validator/core/formatters/sarif.py,sha256=03MHSyuZm9FlzaPeWg7wH-UTzzCDhSy6vMPrFpFNkS8,18884
|
|
87
87
|
iam_validator/integrations/__init__.py,sha256=7Hlor_X9j0NZaEjFuSvoXAAuSKQ-zgY19Rk-Dz3JpKo,616
|
|
88
|
-
iam_validator/integrations/github_integration.py,sha256=
|
|
88
|
+
iam_validator/integrations/github_integration.py,sha256=0aeQ_RPTZf5ij7dsBjmtIDz4oHl0BXLno9GperFzTbc,69004
|
|
89
89
|
iam_validator/integrations/ms_teams.py,sha256=t2PlWuTDb6GGH-eDU1jnOKd8D1w4FCB68bahGA7MJcE,14475
|
|
90
90
|
iam_validator/sdk/__init__.py,sha256=AZLnfdn3A9AWb0pMhsbu3GAOAzt6rV7Fi3E3d9_3ZdI,6388
|
|
91
91
|
iam_validator/sdk/arn_matching.py,sha256=HSDpLltOYISq-SoPebAlM89mKOaUaghq_04urchEFDA,12778
|
|
@@ -99,8 +99,8 @@ iam_validator/utils/__init__.py,sha256=NveA2F3G1E6-ANZzFr7J6Q6u5mogvMp862iFokmYu
|
|
|
99
99
|
iam_validator/utils/cache.py,sha256=wOQKOBeoG6QqC5f0oXcHz63Cjtu_-SsSS-0pTSwyAiM,3254
|
|
100
100
|
iam_validator/utils/regex.py,sha256=xHoMECttb7qaMhts-c9b0GIxdhHNZTt-UBr7wNhWfzg,6219
|
|
101
101
|
iam_validator/utils/terminal.py,sha256=FsRaRMH_JAyDgXWBCOgOEhbS89cs17HCmKYoughq5io,724
|
|
102
|
-
iam_policy_validator-1.14.
|
|
103
|
-
iam_policy_validator-1.14.
|
|
104
|
-
iam_policy_validator-1.14.
|
|
105
|
-
iam_policy_validator-1.14.
|
|
106
|
-
iam_policy_validator-1.14.
|
|
102
|
+
iam_policy_validator-1.14.6.dist-info/METADATA,sha256=pAPaTarqbLi7q_cdHu3bkCVeu9BHed34jIQHgMXlj5I,34456
|
|
103
|
+
iam_policy_validator-1.14.6.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
104
|
+
iam_policy_validator-1.14.6.dist-info/entry_points.txt,sha256=8HtWd8O7mvPiPdZR5YbzY8or_qcqLM4-pKaFdhtFT8M,62
|
|
105
|
+
iam_policy_validator-1.14.6.dist-info/licenses/LICENSE,sha256=AMnbFTBDcK4_MITe2wiQBkj0vg-jjBBhsc43ydC7tt4,1098
|
|
106
|
+
iam_policy_validator-1.14.6.dist-info/RECORD,,
|
iam_validator/__version__.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
This file is the single source of truth for the package version.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
-
__version__ = "1.14.
|
|
6
|
+
__version__ = "1.14.6"
|
|
7
7
|
# Parse version, handling pre-release suffixes like -rc, -alpha, -beta
|
|
8
8
|
_version_base = __version__.split("-", maxsplit=1)[0] # Remove pre-release suffix if present
|
|
9
9
|
__version_info__ = tuple(int(part) for part in _version_base.split("."))
|
|
@@ -6,10 +6,11 @@ When those severities are not found, it removes the labels if present.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import logging
|
|
9
|
+
from collections.abc import Callable
|
|
9
10
|
from typing import TYPE_CHECKING
|
|
10
11
|
|
|
11
12
|
if TYPE_CHECKING:
|
|
12
|
-
from iam_validator.core.models import PolicyValidationResult, ValidationReport
|
|
13
|
+
from iam_validator.core.models import PolicyValidationResult, ValidationIssue, ValidationReport
|
|
13
14
|
from iam_validator.integrations.github_integration import GitHubIntegration
|
|
14
15
|
|
|
15
16
|
logger = logging.getLogger(__name__)
|
|
@@ -48,11 +49,17 @@ class LabelManager:
|
|
|
48
49
|
"""
|
|
49
50
|
return bool(self.severity_labels) and self.github.is_configured()
|
|
50
51
|
|
|
51
|
-
def _get_severities_in_results(
|
|
52
|
+
def _get_severities_in_results(
|
|
53
|
+
self,
|
|
54
|
+
results: list["PolicyValidationResult"],
|
|
55
|
+
is_issue_ignored: Callable[["ValidationIssue", str], bool] | None = None,
|
|
56
|
+
) -> set[str]:
|
|
52
57
|
"""Extract all severity levels found in validation results.
|
|
53
58
|
|
|
54
59
|
Args:
|
|
55
60
|
results: List of PolicyValidationResult objects
|
|
61
|
+
is_issue_ignored: Optional callback to check if an issue is ignored.
|
|
62
|
+
Takes (issue, file_path) and returns True if ignored.
|
|
56
63
|
|
|
57
64
|
Returns:
|
|
58
65
|
Set of severity levels found (e.g., {"error", "critical", "high"})
|
|
@@ -60,6 +67,9 @@ class LabelManager:
|
|
|
60
67
|
severities = set()
|
|
61
68
|
for result in results:
|
|
62
69
|
for issue in result.issues:
|
|
70
|
+
# Skip ignored issues if a filter is provided
|
|
71
|
+
if is_issue_ignored and is_issue_ignored(issue, result.policy_file):
|
|
72
|
+
continue
|
|
63
73
|
severities.add(issue.severity)
|
|
64
74
|
return severities
|
|
65
75
|
|
|
@@ -113,17 +123,22 @@ class LabelManager:
|
|
|
113
123
|
return labels_to_remove
|
|
114
124
|
|
|
115
125
|
async def manage_labels_from_results(
|
|
116
|
-
self,
|
|
126
|
+
self,
|
|
127
|
+
results: list["PolicyValidationResult"],
|
|
128
|
+
is_issue_ignored: Callable[["ValidationIssue", str], bool] | None = None,
|
|
117
129
|
) -> tuple[bool, int, int]:
|
|
118
130
|
"""Manage PR labels based on validation results.
|
|
119
131
|
|
|
120
132
|
This method will:
|
|
121
|
-
1. Determine which severity levels are present in the results
|
|
133
|
+
1. Determine which severity levels are present in the results (excluding ignored issues)
|
|
122
134
|
2. Add labels for severities that are found
|
|
123
135
|
3. Remove labels for severities that are not found
|
|
124
136
|
|
|
125
137
|
Args:
|
|
126
138
|
results: List of PolicyValidationResult objects
|
|
139
|
+
is_issue_ignored: Optional callback to check if an issue is ignored.
|
|
140
|
+
Takes (issue, file_path) and returns True if ignored.
|
|
141
|
+
Ignored issues are excluded from label determination.
|
|
127
142
|
|
|
128
143
|
Returns:
|
|
129
144
|
Tuple of (success, labels_added, labels_removed)
|
|
@@ -132,8 +147,8 @@ class LabelManager:
|
|
|
132
147
|
logger.debug("Label management not enabled (no severity_labels configured)")
|
|
133
148
|
return (True, 0, 0)
|
|
134
149
|
|
|
135
|
-
# Get all severities found in results
|
|
136
|
-
found_severities = self._get_severities_in_results(results)
|
|
150
|
+
# Get all severities found in results (excluding ignored issues)
|
|
151
|
+
found_severities = self._get_severities_in_results(results, is_issue_ignored)
|
|
137
152
|
logger.debug(f"Found severities in results: {found_severities}")
|
|
138
153
|
|
|
139
154
|
# Determine which labels to apply/remove
|
|
@@ -182,7 +197,11 @@ class LabelManager:
|
|
|
182
197
|
|
|
183
198
|
return (success, added_count, removed_count)
|
|
184
199
|
|
|
185
|
-
async def manage_labels_from_report(
|
|
200
|
+
async def manage_labels_from_report(
|
|
201
|
+
self,
|
|
202
|
+
report: "ValidationReport",
|
|
203
|
+
is_issue_ignored: Callable[["ValidationIssue", str], bool] | None = None,
|
|
204
|
+
) -> tuple[bool, int, int]:
|
|
186
205
|
"""Manage PR labels based on validation report.
|
|
187
206
|
|
|
188
207
|
This is a convenience method that extracts results from the report
|
|
@@ -190,8 +209,11 @@ class LabelManager:
|
|
|
190
209
|
|
|
191
210
|
Args:
|
|
192
211
|
report: ValidationReport object
|
|
212
|
+
is_issue_ignored: Optional callback to check if an issue is ignored.
|
|
213
|
+
Takes (issue, file_path) and returns True if ignored.
|
|
214
|
+
Ignored issues are excluded from label determination.
|
|
193
215
|
|
|
194
216
|
Returns:
|
|
195
217
|
Tuple of (success, labels_added, labels_removed)
|
|
196
218
|
"""
|
|
197
|
-
return await self.manage_labels_from_results(report.results)
|
|
219
|
+
return await self.manage_labels_from_results(report.results, is_issue_ignored)
|
|
@@ -196,7 +196,17 @@ class PRCommenter:
|
|
|
196
196
|
# Manage PR labels based on severity findings
|
|
197
197
|
if manage_labels and self.severity_labels:
|
|
198
198
|
label_manager = LabelManager(self.github, self.severity_labels)
|
|
199
|
-
|
|
199
|
+
|
|
200
|
+
# Create a filter function that uses relative paths for ignored finding lookup
|
|
201
|
+
def is_issue_ignored_for_labels(issue: ValidationIssue, file_path: str) -> bool:
|
|
202
|
+
relative_path = self._make_relative_path(file_path)
|
|
203
|
+
if not relative_path:
|
|
204
|
+
return False
|
|
205
|
+
return self._is_issue_ignored(issue, relative_path)
|
|
206
|
+
|
|
207
|
+
label_success, added, removed = await label_manager.manage_labels_from_report(
|
|
208
|
+
report, is_issue_ignored=is_issue_ignored_for_labels
|
|
209
|
+
)
|
|
200
210
|
|
|
201
211
|
if not label_success:
|
|
202
212
|
logger.error("Failed to manage PR labels")
|
iam_validator/core/report.py
CHANGED
|
@@ -478,13 +478,14 @@ class ReportGenerator:
|
|
|
478
478
|
|
|
479
479
|
# Issue breakdown
|
|
480
480
|
if report.total_issues > 0:
|
|
481
|
-
# Count issues -
|
|
482
|
-
|
|
483
|
-
1
|
|
484
|
-
for r in report.results
|
|
485
|
-
for i in r.issues
|
|
486
|
-
if i.severity in constants.HIGH_SEVERITY_LEVELS
|
|
481
|
+
# Count issues - separate validity errors from security findings
|
|
482
|
+
validity_errors = sum(
|
|
483
|
+
1 for r in report.results for i in r.issues if i.severity == "error"
|
|
487
484
|
)
|
|
485
|
+
critical_findings = sum(
|
|
486
|
+
1 for r in report.results for i in r.issues if i.severity == "critical"
|
|
487
|
+
)
|
|
488
|
+
high_findings = sum(1 for r in report.results for i in r.issues if i.severity == "high")
|
|
488
489
|
warnings = sum(
|
|
489
490
|
1 for r in report.results for i in r.issues if i.severity in ("warning", "medium")
|
|
490
491
|
)
|
|
@@ -496,8 +497,12 @@ class ReportGenerator:
|
|
|
496
497
|
lines.append("")
|
|
497
498
|
lines.append("| Severity | Count |")
|
|
498
499
|
lines.append("|----------|------:|")
|
|
499
|
-
if
|
|
500
|
-
lines.append(f"| 🔴 **Errors** | {
|
|
500
|
+
if validity_errors > 0:
|
|
501
|
+
lines.append(f"| 🔴 **Errors** | {validity_errors} |")
|
|
502
|
+
if critical_findings > 0:
|
|
503
|
+
lines.append(f"| 🟣 **Critical** | {critical_findings} |")
|
|
504
|
+
if high_findings > 0:
|
|
505
|
+
lines.append(f"| 🔶 **High** | {high_findings} |")
|
|
501
506
|
if warnings > 0:
|
|
502
507
|
lines.append(f"| 🟡 **Warnings** | {warnings} |")
|
|
503
508
|
if infos > 0:
|
|
@@ -578,15 +583,31 @@ class ReportGenerator:
|
|
|
578
583
|
)
|
|
579
584
|
lines.append("")
|
|
580
585
|
|
|
581
|
-
# Group issues by severity -
|
|
582
|
-
|
|
586
|
+
# Group issues by severity - separate validity errors from security findings
|
|
587
|
+
validity_errors = [i for i in result.issues if i.severity == "error"]
|
|
588
|
+
critical_findings = [i for i in result.issues if i.severity == "critical"]
|
|
589
|
+
high_findings = [i for i in result.issues if i.severity == "high"]
|
|
583
590
|
warnings = [i for i in result.issues if i.severity in constants.MEDIUM_SEVERITY_LEVELS]
|
|
584
591
|
infos = [i for i in result.issues if i.severity in constants.LOW_SEVERITY_LEVELS]
|
|
585
592
|
|
|
586
|
-
if
|
|
593
|
+
if validity_errors:
|
|
587
594
|
lines.append("### 🔴 Errors")
|
|
588
595
|
lines.append("")
|
|
589
|
-
for issue in
|
|
596
|
+
for issue in validity_errors:
|
|
597
|
+
lines.append(self._format_issue_markdown(issue, result.policy_file))
|
|
598
|
+
lines.append("")
|
|
599
|
+
|
|
600
|
+
if critical_findings:
|
|
601
|
+
lines.append("### 🟣 Critical")
|
|
602
|
+
lines.append("")
|
|
603
|
+
for issue in critical_findings:
|
|
604
|
+
lines.append(self._format_issue_markdown(issue, result.policy_file))
|
|
605
|
+
lines.append("")
|
|
606
|
+
|
|
607
|
+
if high_findings:
|
|
608
|
+
lines.append("### 🔶 High")
|
|
609
|
+
lines.append("")
|
|
610
|
+
for issue in high_findings:
|
|
590
611
|
lines.append(self._format_issue_markdown(issue, result.policy_file))
|
|
591
612
|
lines.append("")
|
|
592
613
|
|
|
@@ -693,13 +714,14 @@ class ReportGenerator:
|
|
|
693
714
|
|
|
694
715
|
# Issue breakdown
|
|
695
716
|
if report.total_issues > 0:
|
|
696
|
-
# Count issues -
|
|
697
|
-
|
|
698
|
-
1
|
|
699
|
-
for r in report.results
|
|
700
|
-
for i in r.issues
|
|
701
|
-
if i.severity in constants.HIGH_SEVERITY_LEVELS
|
|
717
|
+
# Count issues - separate validity errors from security findings
|
|
718
|
+
validity_errors = sum(
|
|
719
|
+
1 for r in report.results for i in r.issues if i.severity == "error"
|
|
702
720
|
)
|
|
721
|
+
critical_findings = sum(
|
|
722
|
+
1 for r in report.results for i in r.issues if i.severity == "critical"
|
|
723
|
+
)
|
|
724
|
+
high_findings = sum(1 for r in report.results for i in r.issues if i.severity == "high")
|
|
703
725
|
warnings = sum(
|
|
704
726
|
1 for r in report.results for i in r.issues if i.severity in ("warning", "medium")
|
|
705
727
|
)
|
|
@@ -711,8 +733,12 @@ class ReportGenerator:
|
|
|
711
733
|
lines.append("")
|
|
712
734
|
lines.append("| Severity | Count |")
|
|
713
735
|
lines.append("|----------|------:|")
|
|
714
|
-
if
|
|
715
|
-
lines.append(f"| 🔴 **Errors** | {
|
|
736
|
+
if validity_errors > 0:
|
|
737
|
+
lines.append(f"| 🔴 **Errors** | {validity_errors} |")
|
|
738
|
+
if critical_findings > 0:
|
|
739
|
+
lines.append(f"| 🟣 **Critical** | {critical_findings} |")
|
|
740
|
+
if high_findings > 0:
|
|
741
|
+
lines.append(f"| 🔶 **High** | {high_findings} |")
|
|
716
742
|
if warnings > 0:
|
|
717
743
|
lines.append(f"| 🟡 **Warnings** | {warnings} |")
|
|
718
744
|
if infos > 0:
|
|
@@ -789,15 +815,21 @@ class ReportGenerator:
|
|
|
789
815
|
|
|
790
816
|
policy_lines = []
|
|
791
817
|
|
|
792
|
-
# Group issues by severity -
|
|
793
|
-
|
|
818
|
+
# Group issues by severity - separate validity errors from security findings
|
|
819
|
+
validity_errors = [i for i in result.issues if i.severity == "error"]
|
|
820
|
+
critical_findings = [i for i in result.issues if i.severity == "critical"]
|
|
821
|
+
high_findings = [i for i in result.issues if i.severity == "high"]
|
|
794
822
|
warnings = [i for i in result.issues if i.severity in ("warning", "medium")]
|
|
795
823
|
infos = [i for i in result.issues if i.severity in ("info", "low")]
|
|
796
824
|
|
|
797
825
|
# Build severity summary for header
|
|
798
826
|
severity_parts = []
|
|
799
|
-
if
|
|
800
|
-
severity_parts.append(f"🔴 {len(
|
|
827
|
+
if validity_errors:
|
|
828
|
+
severity_parts.append(f"🔴 {len(validity_errors)}")
|
|
829
|
+
if critical_findings:
|
|
830
|
+
severity_parts.append(f"🟣 {len(critical_findings)}")
|
|
831
|
+
if high_findings:
|
|
832
|
+
severity_parts.append(f"🔶 {len(high_findings)}")
|
|
801
833
|
if warnings:
|
|
802
834
|
severity_parts.append(f"🟡 {len(warnings)}")
|
|
803
835
|
if infos:
|
|
@@ -812,11 +844,57 @@ class ReportGenerator:
|
|
|
812
844
|
)
|
|
813
845
|
policy_lines.append("")
|
|
814
846
|
|
|
815
|
-
# Add errors (prioritized)
|
|
816
|
-
if
|
|
847
|
+
# Add validity errors (prioritized)
|
|
848
|
+
if validity_errors:
|
|
817
849
|
policy_lines.append("### 🔴 Errors")
|
|
818
850
|
policy_lines.append("")
|
|
819
|
-
for i, issue in enumerate(
|
|
851
|
+
for i, issue in enumerate(validity_errors):
|
|
852
|
+
issue_content = self._format_issue_markdown(issue, result.policy_file)
|
|
853
|
+
test_length = len("\n".join(details_lines + policy_lines)) + len(
|
|
854
|
+
issue_content
|
|
855
|
+
)
|
|
856
|
+
if test_length > available_length:
|
|
857
|
+
truncated = True
|
|
858
|
+
break
|
|
859
|
+
policy_lines.append(issue_content)
|
|
860
|
+
issues_shown += 1
|
|
861
|
+
# Add separator between issues within same severity
|
|
862
|
+
if i < len(validity_errors) - 1:
|
|
863
|
+
policy_lines.append("---")
|
|
864
|
+
policy_lines.append("")
|
|
865
|
+
policy_lines.append("")
|
|
866
|
+
|
|
867
|
+
if truncated:
|
|
868
|
+
break
|
|
869
|
+
|
|
870
|
+
# Add critical security findings
|
|
871
|
+
if critical_findings:
|
|
872
|
+
policy_lines.append("### 🟣 Critical")
|
|
873
|
+
policy_lines.append("")
|
|
874
|
+
for i, issue in enumerate(critical_findings):
|
|
875
|
+
issue_content = self._format_issue_markdown(issue, result.policy_file)
|
|
876
|
+
test_length = len("\n".join(details_lines + policy_lines)) + len(
|
|
877
|
+
issue_content
|
|
878
|
+
)
|
|
879
|
+
if test_length > available_length:
|
|
880
|
+
truncated = True
|
|
881
|
+
break
|
|
882
|
+
policy_lines.append(issue_content)
|
|
883
|
+
issues_shown += 1
|
|
884
|
+
# Add separator between issues within same severity
|
|
885
|
+
if i < len(critical_findings) - 1:
|
|
886
|
+
policy_lines.append("---")
|
|
887
|
+
policy_lines.append("")
|
|
888
|
+
policy_lines.append("")
|
|
889
|
+
|
|
890
|
+
if truncated:
|
|
891
|
+
break
|
|
892
|
+
|
|
893
|
+
# Add high security findings
|
|
894
|
+
if high_findings:
|
|
895
|
+
policy_lines.append("### 🔶 High")
|
|
896
|
+
policy_lines.append("")
|
|
897
|
+
for i, issue in enumerate(high_findings):
|
|
820
898
|
issue_content = self._format_issue_markdown(issue, result.policy_file)
|
|
821
899
|
test_length = len("\n".join(details_lines + policy_lines)) + len(
|
|
822
900
|
issue_content
|
|
@@ -827,7 +905,7 @@ class ReportGenerator:
|
|
|
827
905
|
policy_lines.append(issue_content)
|
|
828
906
|
issues_shown += 1
|
|
829
907
|
# Add separator between issues within same severity
|
|
830
|
-
if i < len(
|
|
908
|
+
if i < len(high_findings) - 1:
|
|
831
909
|
policy_lines.append("---")
|
|
832
910
|
policy_lines.append("")
|
|
833
911
|
policy_lines.append("")
|
|
@@ -1265,6 +1265,26 @@ class GitHubIntegration:
|
|
|
1265
1265
|
f"{created_count} created, {deleted_count} deleted (resolved)"
|
|
1266
1266
|
)
|
|
1267
1267
|
|
|
1268
|
+
# Step 4: If no new comments were created but we need to submit APPROVE/REQUEST_CHANGES,
|
|
1269
|
+
# submit a review without inline comments to update the PR review state.
|
|
1270
|
+
# This is important when all issues are ignored/resolved - we need to dismiss
|
|
1271
|
+
# the previous REQUEST_CHANGES review by submitting an APPROVE review.
|
|
1272
|
+
if not new_comments_for_review and event in (
|
|
1273
|
+
ReviewEvent.APPROVE,
|
|
1274
|
+
ReviewEvent.REQUEST_CHANGES,
|
|
1275
|
+
):
|
|
1276
|
+
# Only submit if there's a meaningful state change to make
|
|
1277
|
+
# (submitting APPROVE when all issues are resolved/ignored)
|
|
1278
|
+
logger.info(f"Submitting {event.value} review (no inline comments)")
|
|
1279
|
+
success = await self.create_review_with_comments(
|
|
1280
|
+
comments=[],
|
|
1281
|
+
body=body,
|
|
1282
|
+
event=event,
|
|
1283
|
+
)
|
|
1284
|
+
if not success:
|
|
1285
|
+
logger.warning(f"Failed to submit {event.value} review")
|
|
1286
|
+
# Don't fail the whole operation - comments were managed successfully
|
|
1287
|
+
|
|
1268
1288
|
return True
|
|
1269
1289
|
|
|
1270
1290
|
def _extract_finding_id(self, body: str) -> str | None:
|
|
File without changes
|
{iam_policy_validator-1.14.4.dist-info → iam_policy_validator-1.14.6.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{iam_policy_validator-1.14.4.dist-info → iam_policy_validator-1.14.6.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|