iam-policy-validator 1.6.0__py3-none-any.whl → 1.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iam_policy_validator-1.7.1.dist-info/METADATA +429 -0
- {iam_policy_validator-1.6.0.dist-info → iam_policy_validator-1.7.1.dist-info}/RECORD +32 -31
- iam_validator/__version__.py +1 -1
- iam_validator/checks/action_condition_enforcement.py +3 -1
- iam_validator/checks/action_resource_matching.py +23 -6
- iam_validator/checks/full_wildcard.py +5 -1
- iam_validator/checks/policy_size.py +3 -7
- iam_validator/checks/policy_type_validation.py +9 -3
- iam_validator/checks/principal_validation.py +1 -1
- iam_validator/checks/resource_validation.py +54 -24
- iam_validator/checks/sensitive_action.py +5 -1
- iam_validator/checks/service_wildcard.py +3 -1
- iam_validator/checks/utils/sensitive_action_matcher.py +1 -2
- iam_validator/checks/utils/wildcard_expansion.py +1 -2
- iam_validator/checks/wildcard_action.py +7 -2
- iam_validator/checks/wildcard_resource.py +5 -1
- iam_validator/commands/analyze.py +98 -1
- iam_validator/commands/validate.py +4 -2
- iam_validator/core/access_analyzer.py +5 -0
- iam_validator/core/access_analyzer_report.py +2 -5
- iam_validator/core/aws_fetcher.py +14 -4
- iam_validator/core/config/config_loader.py +3 -6
- iam_validator/core/constants.py +74 -0
- iam_validator/core/models.py +29 -13
- iam_validator/core/pr_commenter.py +104 -18
- iam_validator/core/report.py +49 -36
- iam_validator/integrations/github_integration.py +21 -1
- iam_validator/sdk/arn_matching.py +108 -0
- iam_validator/utils/regex.py +7 -8
- iam_policy_validator-1.6.0.dist-info/METADATA +0 -1050
- {iam_policy_validator-1.6.0.dist-info → iam_policy_validator-1.7.1.dist-info}/WHEEL +0 -0
- {iam_policy_validator-1.6.0.dist-info → iam_policy_validator-1.7.1.dist-info}/entry_points.txt +0 -0
- {iam_policy_validator-1.6.0.dist-info → iam_policy_validator-1.7.1.dist-info}/licenses/LICENSE +0 -0
iam_validator/core/models.py
CHANGED
|
@@ -236,23 +236,39 @@ class ValidationIssue(BaseModel):
|
|
|
236
236
|
# Main issue header with statement context
|
|
237
237
|
parts.append(f"{emoji} **{self.severity.upper()}** in **{statement_context}**")
|
|
238
238
|
parts.append("")
|
|
239
|
+
|
|
240
|
+
# Show message immediately (not collapsed)
|
|
239
241
|
parts.append(self.message)
|
|
240
242
|
|
|
241
|
-
#
|
|
242
|
-
|
|
243
|
+
# Put additional details in collapsible section if there are any
|
|
244
|
+
has_details = bool(self.action or self.resource or self.condition_key or self.suggestion)
|
|
245
|
+
|
|
246
|
+
if has_details:
|
|
243
247
|
parts.append("")
|
|
244
|
-
parts.append("
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
if
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
248
|
+
parts.append("<details>")
|
|
249
|
+
parts.append("<summary>📋 <b>View Details</b></summary>")
|
|
250
|
+
parts.append("")
|
|
251
|
+
parts.append("") # Extra spacing after opening
|
|
252
|
+
|
|
253
|
+
# Add affected fields section if any are present
|
|
254
|
+
if self.action or self.resource or self.condition_key:
|
|
255
|
+
parts.append("**Affected Fields:**")
|
|
256
|
+
if self.action:
|
|
257
|
+
parts.append(f" - Action: `{self.action}`")
|
|
258
|
+
if self.resource:
|
|
259
|
+
parts.append(f" - Resource: `{self.resource}`")
|
|
260
|
+
if self.condition_key:
|
|
261
|
+
parts.append(f" - Condition Key: `{self.condition_key}`")
|
|
262
|
+
parts.append("")
|
|
263
|
+
|
|
264
|
+
# Add suggestion if present
|
|
265
|
+
if self.suggestion:
|
|
266
|
+
parts.append("**💡 Suggested Fix:**")
|
|
267
|
+
parts.append("")
|
|
268
|
+
parts.append(self.suggestion)
|
|
269
|
+
|
|
254
270
|
parts.append("")
|
|
255
|
-
parts.append(
|
|
271
|
+
parts.append("</details>")
|
|
256
272
|
|
|
257
273
|
return "\n".join(parts)
|
|
258
274
|
|
|
@@ -8,6 +8,11 @@ import json
|
|
|
8
8
|
import logging
|
|
9
9
|
from typing import Any
|
|
10
10
|
|
|
11
|
+
from iam_validator.core.constants import (
|
|
12
|
+
BOT_IDENTIFIER,
|
|
13
|
+
REVIEW_IDENTIFIER,
|
|
14
|
+
SUMMARY_IDENTIFIER,
|
|
15
|
+
)
|
|
11
16
|
from iam_validator.core.models import ValidationIssue, ValidationReport
|
|
12
17
|
from iam_validator.integrations.github_integration import GitHubIntegration, ReviewEvent
|
|
13
18
|
|
|
@@ -17,10 +22,10 @@ logger = logging.getLogger(__name__)
|
|
|
17
22
|
class PRCommenter:
|
|
18
23
|
"""Posts validation findings as PR comments."""
|
|
19
24
|
|
|
20
|
-
#
|
|
21
|
-
BOT_IDENTIFIER =
|
|
22
|
-
SUMMARY_IDENTIFIER =
|
|
23
|
-
REVIEW_IDENTIFIER =
|
|
25
|
+
# Load identifiers from constants module for consistency
|
|
26
|
+
BOT_IDENTIFIER = BOT_IDENTIFIER
|
|
27
|
+
SUMMARY_IDENTIFIER = SUMMARY_IDENTIFIER
|
|
28
|
+
REVIEW_IDENTIFIER = REVIEW_IDENTIFIER
|
|
24
29
|
|
|
25
30
|
def __init__(
|
|
26
31
|
self,
|
|
@@ -60,7 +65,11 @@ class PRCommenter:
|
|
|
60
65
|
self.github = GitHubIntegration()
|
|
61
66
|
|
|
62
67
|
if not self.github.is_configured():
|
|
63
|
-
logger.error(
|
|
68
|
+
logger.error(
|
|
69
|
+
"GitHub integration not configured. "
|
|
70
|
+
"Required: GITHUB_TOKEN, GITHUB_REPOSITORY, and GITHUB_PR_NUMBER environment variables. "
|
|
71
|
+
"Ensure your workflow is triggered by a pull_request event."
|
|
72
|
+
)
|
|
64
73
|
return False
|
|
65
74
|
|
|
66
75
|
success = True
|
|
@@ -116,6 +125,14 @@ class PRCommenter:
|
|
|
116
125
|
if not result.issues:
|
|
117
126
|
continue
|
|
118
127
|
|
|
128
|
+
# Convert absolute path to relative path for GitHub
|
|
129
|
+
relative_path = self._make_relative_path(result.policy_file)
|
|
130
|
+
if not relative_path:
|
|
131
|
+
logger.warning(
|
|
132
|
+
f"Could not determine relative path for {result.policy_file}, skipping review comments"
|
|
133
|
+
)
|
|
134
|
+
continue
|
|
135
|
+
|
|
119
136
|
# Try to determine line numbers from the policy file
|
|
120
137
|
line_mapping = self._get_line_mapping(result.policy_file)
|
|
121
138
|
|
|
@@ -125,14 +142,21 @@ class PRCommenter:
|
|
|
125
142
|
|
|
126
143
|
if line_number:
|
|
127
144
|
comment = {
|
|
128
|
-
"path":
|
|
145
|
+
"path": relative_path, # Use relative path for GitHub
|
|
129
146
|
"line": line_number,
|
|
130
147
|
"body": issue.to_pr_comment(),
|
|
131
148
|
}
|
|
132
149
|
|
|
133
|
-
if
|
|
134
|
-
comments_by_file[
|
|
135
|
-
comments_by_file[
|
|
150
|
+
if relative_path not in comments_by_file:
|
|
151
|
+
comments_by_file[relative_path] = []
|
|
152
|
+
comments_by_file[relative_path].append(comment)
|
|
153
|
+
logger.debug(
|
|
154
|
+
f"Prepared review comment for {relative_path}:{line_number} - {issue.issue_type}"
|
|
155
|
+
)
|
|
156
|
+
else:
|
|
157
|
+
logger.debug(
|
|
158
|
+
f"Could not determine line number for issue in {relative_path}: {issue.issue_type}"
|
|
159
|
+
)
|
|
136
160
|
|
|
137
161
|
# If no line-specific comments, skip
|
|
138
162
|
if not comments_by_file:
|
|
@@ -144,6 +168,14 @@ class PRCommenter:
|
|
|
144
168
|
for file_comments in comments_by_file.values():
|
|
145
169
|
all_comments.extend(file_comments)
|
|
146
170
|
|
|
171
|
+
logger.info(
|
|
172
|
+
f"Posting {len(all_comments)} review comments across {len(comments_by_file)} file(s)"
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Log files that will receive comments (for debugging)
|
|
176
|
+
for file_path, file_comments in comments_by_file.items():
|
|
177
|
+
logger.debug(f" {file_path}: {len(file_comments)} comment(s)")
|
|
178
|
+
|
|
147
179
|
# Determine review event based on fail_on_severities config
|
|
148
180
|
# Check if any issue has a severity that should trigger REQUEST_CHANGES
|
|
149
181
|
has_blocking_issues = any(
|
|
@@ -154,22 +186,76 @@ class PRCommenter:
|
|
|
154
186
|
|
|
155
187
|
# Set review event: request changes if any blocking issues, else comment
|
|
156
188
|
event = ReviewEvent.REQUEST_CHANGES if has_blocking_issues else ReviewEvent.COMMENT
|
|
189
|
+
logger.info(f"Creating PR review with event: {event.value}")
|
|
157
190
|
|
|
158
|
-
# Post review with comments (
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
f"🤖 **IAM Policy Validator**\n\n"
|
|
162
|
-
f"## Validation Results\n\n"
|
|
163
|
-
f"Found {report.total_issues} issues across {report.total_policies} policies.\n"
|
|
164
|
-
f"See inline comments for details."
|
|
165
|
-
)
|
|
191
|
+
# Post review with comments (use minimal body since summary comment has the details)
|
|
192
|
+
# Only include the identifier for cleanup purposes
|
|
193
|
+
review_body = f"{self.REVIEW_IDENTIFIER}"
|
|
166
194
|
|
|
167
|
-
|
|
195
|
+
success = await self.github.create_review_with_comments(
|
|
168
196
|
comments=all_comments,
|
|
169
197
|
body=review_body,
|
|
170
198
|
event=event,
|
|
171
199
|
)
|
|
172
200
|
|
|
201
|
+
if success:
|
|
202
|
+
logger.info(f"Successfully created PR review with {len(all_comments)} comments")
|
|
203
|
+
else:
|
|
204
|
+
logger.error("Failed to create PR review")
|
|
205
|
+
|
|
206
|
+
return success
|
|
207
|
+
|
|
208
|
+
def _make_relative_path(self, policy_file: str) -> str | None:
|
|
209
|
+
"""Convert absolute path to relative path for GitHub.
|
|
210
|
+
|
|
211
|
+
GitHub PR review comments require paths relative to the repository root.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
policy_file: Absolute or relative path to policy file
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
Relative path from repository root, or None if cannot be determined
|
|
218
|
+
"""
|
|
219
|
+
import os
|
|
220
|
+
from pathlib import Path
|
|
221
|
+
|
|
222
|
+
# If already relative, use as-is
|
|
223
|
+
if not os.path.isabs(policy_file):
|
|
224
|
+
return policy_file
|
|
225
|
+
|
|
226
|
+
# Try to get workspace path from environment
|
|
227
|
+
workspace = os.getenv("GITHUB_WORKSPACE")
|
|
228
|
+
if workspace:
|
|
229
|
+
try:
|
|
230
|
+
# Convert to Path objects for proper path handling
|
|
231
|
+
abs_file_path = Path(policy_file).resolve()
|
|
232
|
+
workspace_path = Path(workspace).resolve()
|
|
233
|
+
|
|
234
|
+
# Check if file is within workspace
|
|
235
|
+
if abs_file_path.is_relative_to(workspace_path):
|
|
236
|
+
relative = abs_file_path.relative_to(workspace_path)
|
|
237
|
+
# Use forward slashes for GitHub (works on all platforms)
|
|
238
|
+
return str(relative).replace("\\", "/")
|
|
239
|
+
except (ValueError, OSError) as e:
|
|
240
|
+
logger.debug(f"Could not compute relative path for {policy_file}: {e}")
|
|
241
|
+
|
|
242
|
+
# Fallback: try current working directory
|
|
243
|
+
try:
|
|
244
|
+
cwd = Path.cwd()
|
|
245
|
+
abs_file_path = Path(policy_file).resolve()
|
|
246
|
+
if abs_file_path.is_relative_to(cwd):
|
|
247
|
+
relative = abs_file_path.relative_to(cwd)
|
|
248
|
+
return str(relative).replace("\\", "/")
|
|
249
|
+
except (ValueError, OSError) as e:
|
|
250
|
+
logger.debug(f"Could not compute relative path from CWD for {policy_file}: {e}")
|
|
251
|
+
|
|
252
|
+
# If all else fails, return None
|
|
253
|
+
logger.warning(
|
|
254
|
+
f"Could not determine relative path for {policy_file}. "
|
|
255
|
+
"Ensure GITHUB_WORKSPACE is set or file is in current directory."
|
|
256
|
+
)
|
|
257
|
+
return None
|
|
258
|
+
|
|
173
259
|
def _get_line_mapping(self, policy_file: str) -> dict[int, int]:
|
|
174
260
|
"""Get mapping of statement indices to line numbers.
|
|
175
261
|
|
iam_validator/core/report.py
CHANGED
|
@@ -126,7 +126,8 @@ class ReportGenerator:
|
|
|
126
126
|
# Show policies with security findings (separate from validity)
|
|
127
127
|
if report.policies_with_security_issues > 0:
|
|
128
128
|
summary_text.append(
|
|
129
|
-
f"Security Findings: {report.policies_with_security_issues} ",
|
|
129
|
+
f"Security Findings: {report.policies_with_security_issues} ",
|
|
130
|
+
style="yellow",
|
|
130
131
|
)
|
|
131
132
|
|
|
132
133
|
summary_text.append("\n")
|
|
@@ -418,8 +419,7 @@ class ReportGenerator:
|
|
|
418
419
|
1 for r in report.results for i in r.issues if i.severity in ("info", "low")
|
|
419
420
|
)
|
|
420
421
|
|
|
421
|
-
lines.append("
|
|
422
|
-
lines.append("<summary><b>🔍 Issue Breakdown</b></summary>")
|
|
422
|
+
lines.append("### 🔍 Issue Breakdown")
|
|
423
423
|
lines.append("")
|
|
424
424
|
lines.append("| Severity | Count |")
|
|
425
425
|
lines.append("|----------|------:|")
|
|
@@ -430,8 +430,6 @@ class ReportGenerator:
|
|
|
430
430
|
if infos > 0:
|
|
431
431
|
lines.append(f"| 🔵 **Info** | {infos} |")
|
|
432
432
|
lines.append("")
|
|
433
|
-
lines.append("</details>")
|
|
434
|
-
lines.append("")
|
|
435
433
|
|
|
436
434
|
return lines
|
|
437
435
|
|
|
@@ -442,11 +440,7 @@ class ReportGenerator:
|
|
|
442
440
|
"---",
|
|
443
441
|
"",
|
|
444
442
|
"<div align='center'>",
|
|
445
|
-
"",
|
|
446
|
-
"**🤖 Generated by IAM Policy Validator**",
|
|
447
|
-
"",
|
|
448
|
-
"_Powered by AWS IAM Access Analyzer and custom policy checks_",
|
|
449
|
-
"",
|
|
443
|
+
"🤖 <em>Generated by <strong>IAM Policy Validator</strong></em><br>",
|
|
450
444
|
"</div>",
|
|
451
445
|
]
|
|
452
446
|
)
|
|
@@ -455,9 +449,9 @@ class ReportGenerator:
|
|
|
455
449
|
"""Format a single policy's issues for the comment."""
|
|
456
450
|
lines = []
|
|
457
451
|
|
|
458
|
-
lines.append("<details
|
|
452
|
+
lines.append("<details>")
|
|
459
453
|
lines.append(
|
|
460
|
-
f"<summary
|
|
454
|
+
f"<summary>📋 <b>{idx}. <code>{result.policy_file}</code></b> - {len(result.issues)} issue(s) found</summary>"
|
|
461
455
|
)
|
|
462
456
|
lines.append("")
|
|
463
457
|
|
|
@@ -470,21 +464,21 @@ class ReportGenerator:
|
|
|
470
464
|
lines.append("### 🔴 Errors")
|
|
471
465
|
lines.append("")
|
|
472
466
|
for issue in errors:
|
|
473
|
-
lines.append(self._format_issue_markdown(issue))
|
|
467
|
+
lines.append(self._format_issue_markdown(issue, result.policy_file))
|
|
474
468
|
lines.append("")
|
|
475
469
|
|
|
476
470
|
if warnings:
|
|
477
471
|
lines.append("### 🟡 Warnings")
|
|
478
472
|
lines.append("")
|
|
479
473
|
for issue in warnings:
|
|
480
|
-
lines.append(self._format_issue_markdown(issue))
|
|
474
|
+
lines.append(self._format_issue_markdown(issue, result.policy_file))
|
|
481
475
|
lines.append("")
|
|
482
476
|
|
|
483
477
|
if infos:
|
|
484
478
|
lines.append("### 🔵 Info")
|
|
485
479
|
lines.append("")
|
|
486
480
|
for issue in infos:
|
|
487
|
-
lines.append(self._format_issue_markdown(issue))
|
|
481
|
+
lines.append(self._format_issue_markdown(issue, result.policy_file))
|
|
488
482
|
lines.append("")
|
|
489
483
|
|
|
490
484
|
lines.append("</details>")
|
|
@@ -573,8 +567,7 @@ class ReportGenerator:
|
|
|
573
567
|
1 for r in report.results for i in r.issues if i.severity in ("info", "low")
|
|
574
568
|
)
|
|
575
569
|
|
|
576
|
-
lines.append("
|
|
577
|
-
lines.append("<summary><b>🔍 Issue Breakdown</b></summary>")
|
|
570
|
+
lines.append("### 🔍 Issue Breakdown")
|
|
578
571
|
lines.append("")
|
|
579
572
|
lines.append("| Severity | Count |")
|
|
580
573
|
lines.append("|----------|------:|")
|
|
@@ -585,22 +578,17 @@ class ReportGenerator:
|
|
|
585
578
|
if infos > 0:
|
|
586
579
|
lines.append(f"| 🔵 **Info** | {infos} |")
|
|
587
580
|
lines.append("")
|
|
588
|
-
lines.append("</details>")
|
|
589
|
-
lines.append("")
|
|
590
581
|
|
|
591
582
|
# Store header for later (we always include this)
|
|
592
583
|
header_content = "\n".join(lines)
|
|
593
584
|
|
|
594
585
|
# Footer (we always include this)
|
|
595
586
|
footer_lines = [
|
|
587
|
+
"",
|
|
596
588
|
"---",
|
|
597
589
|
"",
|
|
598
590
|
"<div align='center'>",
|
|
599
|
-
"",
|
|
600
|
-
"**🤖 Generated by IAM Policy Validator**",
|
|
601
|
-
"",
|
|
602
|
-
"_Powered by AWS IAM Access Analyzer and custom policy checks_",
|
|
603
|
-
"",
|
|
591
|
+
"🤖 <em>Generated by <strong>IAM Policy Validator</strong></em><br>",
|
|
604
592
|
"</div>",
|
|
605
593
|
]
|
|
606
594
|
footer_content = "\n".join(footer_lines)
|
|
@@ -650,8 +638,8 @@ class ReportGenerator:
|
|
|
650
638
|
severity_summary = " · ".join(severity_parts)
|
|
651
639
|
|
|
652
640
|
# Only open first 3 policy details by default to avoid wall of text
|
|
653
|
-
is_open = " open" if policies_shown < 3 else ""
|
|
654
|
-
policy_lines.append(
|
|
641
|
+
# is_open = " open" if policies_shown < 3 else ""
|
|
642
|
+
policy_lines.append("<details>")
|
|
655
643
|
policy_lines.append(
|
|
656
644
|
f"<summary><b>{idx}. <code>{result.policy_file}</code></b> - {severity_summary}</summary>"
|
|
657
645
|
)
|
|
@@ -662,7 +650,7 @@ class ReportGenerator:
|
|
|
662
650
|
policy_lines.append("### 🔴 Errors")
|
|
663
651
|
policy_lines.append("")
|
|
664
652
|
for i, issue in enumerate(errors):
|
|
665
|
-
issue_content = self._format_issue_markdown(issue)
|
|
653
|
+
issue_content = self._format_issue_markdown(issue, result.policy_file)
|
|
666
654
|
test_length = len("\n".join(details_lines + policy_lines)) + len(
|
|
667
655
|
issue_content
|
|
668
656
|
)
|
|
@@ -685,7 +673,7 @@ class ReportGenerator:
|
|
|
685
673
|
policy_lines.append("### 🟡 Warnings")
|
|
686
674
|
policy_lines.append("")
|
|
687
675
|
for i, issue in enumerate(warnings):
|
|
688
|
-
issue_content = self._format_issue_markdown(issue)
|
|
676
|
+
issue_content = self._format_issue_markdown(issue, result.policy_file)
|
|
689
677
|
test_length = len("\n".join(details_lines + policy_lines)) + len(
|
|
690
678
|
issue_content
|
|
691
679
|
)
|
|
@@ -708,7 +696,7 @@ class ReportGenerator:
|
|
|
708
696
|
policy_lines.append("### 🔵 Info")
|
|
709
697
|
policy_lines.append("")
|
|
710
698
|
for i, issue in enumerate(infos):
|
|
711
|
-
issue_content = self._format_issue_markdown(issue)
|
|
699
|
+
issue_content = self._format_issue_markdown(issue, result.policy_file)
|
|
712
700
|
test_length = len("\n".join(details_lines + policy_lines)) + len(
|
|
713
701
|
issue_content
|
|
714
702
|
)
|
|
@@ -728,8 +716,6 @@ class ReportGenerator:
|
|
|
728
716
|
|
|
729
717
|
policy_lines.append("</details>")
|
|
730
718
|
policy_lines.append("")
|
|
731
|
-
policy_lines.append("---")
|
|
732
|
-
policy_lines.append("")
|
|
733
719
|
|
|
734
720
|
# Check if adding this policy would exceed limit
|
|
735
721
|
test_length = len("\n".join(details_lines + policy_lines))
|
|
@@ -740,6 +726,15 @@ class ReportGenerator:
|
|
|
740
726
|
details_lines.extend(policy_lines)
|
|
741
727
|
policies_shown += 1
|
|
742
728
|
|
|
729
|
+
# Add separator between policies (but not after the last one)
|
|
730
|
+
# The footer will add its own separator
|
|
731
|
+
if (
|
|
732
|
+
policies_shown < len([r for r in sorted_results if r[1].issues])
|
|
733
|
+
and not truncated
|
|
734
|
+
):
|
|
735
|
+
details_lines.append("---")
|
|
736
|
+
details_lines.append("")
|
|
737
|
+
|
|
743
738
|
# Add truncation warning if needed
|
|
744
739
|
if truncated:
|
|
745
740
|
remaining_policies = len([r for r in report.results if r.issues]) - policies_shown
|
|
@@ -776,13 +771,31 @@ class ReportGenerator:
|
|
|
776
771
|
|
|
777
772
|
return "\n".join(lines)
|
|
778
773
|
|
|
779
|
-
def _format_issue_markdown(self, issue: ValidationIssue) -> str:
|
|
780
|
-
"""Format a single issue as markdown.
|
|
774
|
+
def _format_issue_markdown(self, issue: ValidationIssue, policy_file: str | None = None) -> str:
|
|
775
|
+
"""Format a single issue as markdown.
|
|
776
|
+
|
|
777
|
+
Args:
|
|
778
|
+
issue: The validation issue to format
|
|
779
|
+
policy_file: Optional policy file path (currently unused, kept for compatibility)
|
|
780
|
+
"""
|
|
781
781
|
# Use 1-indexed statement numbers for user-facing output
|
|
782
782
|
statement_num = issue.statement_index + 1
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
783
|
+
|
|
784
|
+
# Build statement location reference
|
|
785
|
+
# Note: We show plain text here instead of links because:
|
|
786
|
+
# 1. GitHub's diff anchor format only works for files in the PR diff
|
|
787
|
+
# 2. Inline review comments (posted separately) already provide perfect navigation
|
|
788
|
+
# 3. Summary comment is for overview, not detailed navigation
|
|
789
|
+
if issue.line_number:
|
|
790
|
+
location = f"Statement {statement_num} (Line {issue.line_number})"
|
|
791
|
+
if issue.statement_sid:
|
|
792
|
+
location = (
|
|
793
|
+
f"`{issue.statement_sid}` (statement {statement_num}, line {issue.line_number})"
|
|
794
|
+
)
|
|
795
|
+
else:
|
|
796
|
+
location = f"Statement {statement_num}"
|
|
797
|
+
if issue.statement_sid:
|
|
798
|
+
location = f"`{issue.statement_sid}` (statement {statement_num})"
|
|
786
799
|
|
|
787
800
|
parts = []
|
|
788
801
|
|
|
@@ -238,7 +238,27 @@ class GitHubIntegration:
|
|
|
238
238
|
Returns:
|
|
239
239
|
True if all required environment variables are set
|
|
240
240
|
"""
|
|
241
|
-
|
|
241
|
+
is_valid = all([self.token, self.repository, self.pr_number])
|
|
242
|
+
|
|
243
|
+
# Provide helpful debug info when not configured
|
|
244
|
+
if not is_valid:
|
|
245
|
+
missing = []
|
|
246
|
+
if not self.token:
|
|
247
|
+
missing.append("GITHUB_TOKEN")
|
|
248
|
+
if not self.repository:
|
|
249
|
+
missing.append("GITHUB_REPOSITORY")
|
|
250
|
+
if not self.pr_number:
|
|
251
|
+
missing.append("GITHUB_PR_NUMBER")
|
|
252
|
+
|
|
253
|
+
logger.debug(f"GitHub integration missing: {', '.join(missing)}")
|
|
254
|
+
if not self.pr_number and self.token and self.repository:
|
|
255
|
+
logger.info(
|
|
256
|
+
"GitHub PR integration requires GITHUB_PR_NUMBER. "
|
|
257
|
+
"This is only available when running on pull request events. "
|
|
258
|
+
"Current event may not have PR context."
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
return is_valid
|
|
242
262
|
|
|
243
263
|
async def _make_request(
|
|
244
264
|
self, method: str, endpoint: str, **kwargs: Any
|
|
@@ -245,6 +245,114 @@ def _strip_variables_from_arn(arn: str, replace_with: str = "") -> str:
|
|
|
245
245
|
return re.sub(r"\$\{aws[\.:][\w\/]+\}", replace_with, arn)
|
|
246
246
|
|
|
247
247
|
|
|
248
|
+
def normalize_template_variables(arn: str) -> str:
|
|
249
|
+
"""
|
|
250
|
+
Normalize template variables in ARN to valid placeholders for validation.
|
|
251
|
+
|
|
252
|
+
This function is POSITION-AWARE and handles ANY variable name by determining
|
|
253
|
+
the appropriate replacement based on where the variable appears in the ARN structure.
|
|
254
|
+
It correctly handles variables with colons inside them (e.g., ${AWS::AccountId}).
|
|
255
|
+
|
|
256
|
+
Supports template variables from:
|
|
257
|
+
- Terraform/Terragrunt: ${var.name}, ${local.value}, ${data.source.attr}, etc.
|
|
258
|
+
- CloudFormation: ${AWS::AccountId}, ${AWS::Region}, ${MyParameter}, etc.
|
|
259
|
+
- AWS policy variables: ${aws:username}, ${aws:PrincipalTag/tag-key}, etc.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
arn: ARN string that may contain template variables
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
ARN with template variables replaced with valid placeholders based on position
|
|
266
|
+
|
|
267
|
+
Examples:
|
|
268
|
+
>>> normalize_template_variables("arn:aws:iam::${my_account}:role/name")
|
|
269
|
+
'arn:aws:iam::123456789012:role/name'
|
|
270
|
+
|
|
271
|
+
>>> normalize_template_variables("arn:aws:iam::${AWS::AccountId}:role/name")
|
|
272
|
+
'arn:aws:iam::123456789012:role/name'
|
|
273
|
+
|
|
274
|
+
>>> normalize_template_variables("arn:${var.partition}:s3:::${var.bucket}/*")
|
|
275
|
+
'arn:aws:s3:::placeholder/*'
|
|
276
|
+
"""
|
|
277
|
+
# Strategy: Use a simpler, more robust approach
|
|
278
|
+
# First protect template variables by temporarily replacing them with markers,
|
|
279
|
+
# then split the ARN, then replace based on position
|
|
280
|
+
|
|
281
|
+
# Step 1: Find all template variables and temporarily replace them with position markers
|
|
282
|
+
# This handles variables with colons inside them (like ${AWS::AccountId})
|
|
283
|
+
variables = []
|
|
284
|
+
|
|
285
|
+
def save_variable(match):
|
|
286
|
+
variables.append(match.group(0))
|
|
287
|
+
return f"__VAR{len(variables) - 1}__"
|
|
288
|
+
|
|
289
|
+
# Save all template variables (including those with colons, dots, slashes, etc.)
|
|
290
|
+
temp_arn = re.sub(r"\$\{[^}]+\}", save_variable, arn)
|
|
291
|
+
|
|
292
|
+
# Step 2: Now we can safely split by colons
|
|
293
|
+
parts = temp_arn.split(":", 5)
|
|
294
|
+
|
|
295
|
+
if len(parts) < 6:
|
|
296
|
+
# Not a valid ARN format, restore variables with generic placeholder
|
|
297
|
+
result = arn
|
|
298
|
+
for var in variables:
|
|
299
|
+
if re.match(r"\$\{aws[\.:]", var, re.IGNORECASE):
|
|
300
|
+
result = result.replace(var, "placeholder", 1)
|
|
301
|
+
else:
|
|
302
|
+
result = result.replace(var, "placeholder", 1)
|
|
303
|
+
return result
|
|
304
|
+
|
|
305
|
+
# Step 3: Restore variables based on their position in the ARN
|
|
306
|
+
# ARN format: arn:partition:service:region:account:resource
|
|
307
|
+
replacements = {
|
|
308
|
+
1: "aws", # partition
|
|
309
|
+
2: "s3", # service (generic placeholder)
|
|
310
|
+
3: "us-east-1", # region
|
|
311
|
+
4: "123456789012", # account
|
|
312
|
+
5: "placeholder", # resource
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
for i, part in enumerate(parts):
|
|
316
|
+
if "__VAR" in part:
|
|
317
|
+
# Find all variable markers in this part
|
|
318
|
+
for j, var in enumerate(variables):
|
|
319
|
+
marker = f"__VAR{j}__"
|
|
320
|
+
if marker in part:
|
|
321
|
+
# Determine replacement based on position
|
|
322
|
+
if i in replacements:
|
|
323
|
+
parts[i] = parts[i].replace(marker, replacements[i])
|
|
324
|
+
else:
|
|
325
|
+
parts[i] = parts[i].replace(marker, "placeholder")
|
|
326
|
+
|
|
327
|
+
# Reconstruct ARN
|
|
328
|
+
return ":".join(parts)
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
def has_template_variables(arn: str) -> bool:
|
|
332
|
+
"""
|
|
333
|
+
Check if an ARN contains template variables.
|
|
334
|
+
|
|
335
|
+
Detects template variables from:
|
|
336
|
+
- Terraform/Terragrunt: ${var_name}
|
|
337
|
+
- CloudFormation: ${AWS::AccountId}
|
|
338
|
+
- AWS policy variables: ${aws:username}
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
arn: ARN string to check
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
True if ARN contains template variables, False otherwise
|
|
345
|
+
|
|
346
|
+
Examples:
|
|
347
|
+
>>> has_template_variables("arn:aws:iam::${aws_account_id}:role/name")
|
|
348
|
+
True
|
|
349
|
+
|
|
350
|
+
>>> has_template_variables("arn:aws:iam::123456789012:role/name")
|
|
351
|
+
False
|
|
352
|
+
"""
|
|
353
|
+
return bool(re.search(r"\$\{[\w\-\.\_:\/]+\}", arn))
|
|
354
|
+
|
|
355
|
+
|
|
248
356
|
def convert_aws_pattern_to_wildcard(pattern: str) -> str:
|
|
249
357
|
"""
|
|
250
358
|
Convert AWS ARN pattern format to wildcard pattern for matching.
|
iam_validator/utils/regex.py
CHANGED
|
@@ -13,13 +13,12 @@ Performance benefits:
|
|
|
13
13
|
import re
|
|
14
14
|
from collections.abc import Callable
|
|
15
15
|
from functools import wraps
|
|
16
|
-
from re import Pattern
|
|
17
16
|
|
|
18
17
|
|
|
19
18
|
def cached_pattern(
|
|
20
19
|
flags: int = 0,
|
|
21
20
|
maxsize: int = 128,
|
|
22
|
-
) -> Callable[[Callable[[], str]], Callable[[], Pattern]]:
|
|
21
|
+
) -> Callable[[Callable[[], str]], Callable[[], re.Pattern]]:
|
|
23
22
|
r"""Decorator that caches compiled regex patterns.
|
|
24
23
|
|
|
25
24
|
This decorator transforms a function that returns a regex pattern string
|
|
@@ -60,12 +59,12 @@ def cached_pattern(
|
|
|
60
59
|
Cached calls: ~0.1-0.5μs (cache lookup) → 20-100x faster
|
|
61
60
|
"""
|
|
62
61
|
|
|
63
|
-
def decorator(func: Callable[[], str]) -> Callable[[], Pattern]:
|
|
62
|
+
def decorator(func: Callable[[], str]) -> Callable[[], re.Pattern]:
|
|
64
63
|
# Use a cache per function to avoid key collisions
|
|
65
64
|
cache = {}
|
|
66
65
|
|
|
67
66
|
@wraps(func)
|
|
68
|
-
def wrapper() -> Pattern:
|
|
67
|
+
def wrapper() -> re.Pattern:
|
|
69
68
|
# Use function name as cache key (since each decorated function
|
|
70
69
|
# returns the same pattern string)
|
|
71
70
|
cache_key = func.__name__
|
|
@@ -84,7 +83,7 @@ def cached_pattern(
|
|
|
84
83
|
return decorator
|
|
85
84
|
|
|
86
85
|
|
|
87
|
-
def compile_and_cache(pattern: str, flags: int = 0, maxsize: int = 512) -> Pattern:
|
|
86
|
+
def compile_and_cache(pattern: str, flags: int = 0, maxsize: int = 512) -> re.Pattern:
|
|
88
87
|
"""Compile a regex pattern with automatic caching.
|
|
89
88
|
|
|
90
89
|
This is a functional interface (not a decorator) that compiles and caches
|
|
@@ -116,17 +115,17 @@ def compile_and_cache(pattern: str, flags: int = 0, maxsize: int = 512) -> Patte
|
|
|
116
115
|
from functools import lru_cache
|
|
117
116
|
|
|
118
117
|
@lru_cache(maxsize=maxsize)
|
|
119
|
-
def _compile(pattern_str: str, flags: int) -> Pattern:
|
|
118
|
+
def _compile(pattern_str: str, flags: int) -> re.Pattern:
|
|
120
119
|
return re.compile(pattern_str, flags)
|
|
121
120
|
|
|
122
121
|
return _compile(pattern, flags)
|
|
123
122
|
|
|
124
123
|
|
|
125
124
|
# Singleton instance for shared pattern compilation
|
|
126
|
-
_pattern_cache: dict[tuple[str, int], Pattern] = {}
|
|
125
|
+
_pattern_cache: dict[tuple[str, int], re.Pattern] = {}
|
|
127
126
|
|
|
128
127
|
|
|
129
|
-
def get_cached_pattern(pattern: str, flags: int = 0) -> Pattern:
|
|
128
|
+
def get_cached_pattern(pattern: str, flags: int = 0) -> re.Pattern:
|
|
130
129
|
"""Get a compiled pattern from the shared cache.
|
|
131
130
|
|
|
132
131
|
This provides a simple, stateless way to get cached patterns without
|