iam-policy-validator 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of iam-policy-validator might be problematic. Click here for more details.
- iam_policy_validator-1.7.0.dist-info/METADATA +1057 -0
- iam_policy_validator-1.7.0.dist-info/RECORD +83 -0
- iam_policy_validator-1.7.0.dist-info/WHEEL +4 -0
- iam_policy_validator-1.7.0.dist-info/entry_points.txt +2 -0
- iam_policy_validator-1.7.0.dist-info/licenses/LICENSE +21 -0
- iam_validator/__init__.py +27 -0
- iam_validator/__main__.py +11 -0
- iam_validator/__version__.py +7 -0
- iam_validator/checks/__init__.py +43 -0
- iam_validator/checks/action_condition_enforcement.py +884 -0
- iam_validator/checks/action_resource_matching.py +441 -0
- iam_validator/checks/action_validation.py +72 -0
- iam_validator/checks/condition_key_validation.py +92 -0
- iam_validator/checks/condition_type_mismatch.py +259 -0
- iam_validator/checks/full_wildcard.py +71 -0
- iam_validator/checks/mfa_condition_check.py +112 -0
- iam_validator/checks/policy_size.py +147 -0
- iam_validator/checks/policy_type_validation.py +305 -0
- iam_validator/checks/principal_validation.py +776 -0
- iam_validator/checks/resource_validation.py +138 -0
- iam_validator/checks/sensitive_action.py +254 -0
- iam_validator/checks/service_wildcard.py +107 -0
- iam_validator/checks/set_operator_validation.py +157 -0
- iam_validator/checks/sid_uniqueness.py +170 -0
- iam_validator/checks/utils/__init__.py +1 -0
- iam_validator/checks/utils/policy_level_checks.py +143 -0
- iam_validator/checks/utils/sensitive_action_matcher.py +294 -0
- iam_validator/checks/utils/wildcard_expansion.py +87 -0
- iam_validator/checks/wildcard_action.py +67 -0
- iam_validator/checks/wildcard_resource.py +135 -0
- iam_validator/commands/__init__.py +25 -0
- iam_validator/commands/analyze.py +531 -0
- iam_validator/commands/base.py +48 -0
- iam_validator/commands/cache.py +392 -0
- iam_validator/commands/download_services.py +255 -0
- iam_validator/commands/post_to_pr.py +86 -0
- iam_validator/commands/validate.py +600 -0
- iam_validator/core/__init__.py +14 -0
- iam_validator/core/access_analyzer.py +671 -0
- iam_validator/core/access_analyzer_report.py +640 -0
- iam_validator/core/aws_fetcher.py +940 -0
- iam_validator/core/check_registry.py +607 -0
- iam_validator/core/cli.py +134 -0
- iam_validator/core/condition_validators.py +626 -0
- iam_validator/core/config/__init__.py +81 -0
- iam_validator/core/config/aws_api.py +35 -0
- iam_validator/core/config/aws_global_conditions.py +160 -0
- iam_validator/core/config/category_suggestions.py +104 -0
- iam_validator/core/config/condition_requirements.py +155 -0
- iam_validator/core/config/config_loader.py +472 -0
- iam_validator/core/config/defaults.py +523 -0
- iam_validator/core/config/principal_requirements.py +421 -0
- iam_validator/core/config/sensitive_actions.py +672 -0
- iam_validator/core/config/service_principals.py +95 -0
- iam_validator/core/config/wildcards.py +124 -0
- iam_validator/core/constants.py +74 -0
- iam_validator/core/formatters/__init__.py +27 -0
- iam_validator/core/formatters/base.py +147 -0
- iam_validator/core/formatters/console.py +59 -0
- iam_validator/core/formatters/csv.py +170 -0
- iam_validator/core/formatters/enhanced.py +440 -0
- iam_validator/core/formatters/html.py +672 -0
- iam_validator/core/formatters/json.py +33 -0
- iam_validator/core/formatters/markdown.py +63 -0
- iam_validator/core/formatters/sarif.py +251 -0
- iam_validator/core/models.py +327 -0
- iam_validator/core/policy_checks.py +656 -0
- iam_validator/core/policy_loader.py +396 -0
- iam_validator/core/pr_commenter.py +424 -0
- iam_validator/core/report.py +872 -0
- iam_validator/integrations/__init__.py +28 -0
- iam_validator/integrations/github_integration.py +815 -0
- iam_validator/integrations/ms_teams.py +442 -0
- iam_validator/sdk/__init__.py +187 -0
- iam_validator/sdk/arn_matching.py +382 -0
- iam_validator/sdk/context.py +222 -0
- iam_validator/sdk/exceptions.py +48 -0
- iam_validator/sdk/helpers.py +177 -0
- iam_validator/sdk/policy_utils.py +425 -0
- iam_validator/sdk/shortcuts.py +283 -0
- iam_validator/utils/__init__.py +31 -0
- iam_validator/utils/cache.py +105 -0
- iam_validator/utils/regex.py +206 -0
|
@@ -0,0 +1,424 @@
|
|
|
1
|
+
"""PR Comment Module.
|
|
2
|
+
|
|
3
|
+
This module handles posting validation findings as PR comments.
|
|
4
|
+
It reads a JSON report and posts line-specific comments to GitHub PRs.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from iam_validator.core.constants import (
|
|
12
|
+
BOT_IDENTIFIER,
|
|
13
|
+
REVIEW_IDENTIFIER,
|
|
14
|
+
SUMMARY_IDENTIFIER,
|
|
15
|
+
)
|
|
16
|
+
from iam_validator.core.models import ValidationIssue, ValidationReport
|
|
17
|
+
from iam_validator.integrations.github_integration import GitHubIntegration, ReviewEvent
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class PRCommenter:
|
|
23
|
+
"""Posts validation findings as PR comments."""
|
|
24
|
+
|
|
25
|
+
# Load identifiers from constants module for consistency
|
|
26
|
+
BOT_IDENTIFIER = BOT_IDENTIFIER
|
|
27
|
+
SUMMARY_IDENTIFIER = SUMMARY_IDENTIFIER
|
|
28
|
+
REVIEW_IDENTIFIER = REVIEW_IDENTIFIER
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
github: GitHubIntegration | None = None,
|
|
33
|
+
cleanup_old_comments: bool = True,
|
|
34
|
+
fail_on_severities: list[str] | None = None,
|
|
35
|
+
):
|
|
36
|
+
"""Initialize PR commenter.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
github: GitHubIntegration instance (will create one if None)
|
|
40
|
+
cleanup_old_comments: Whether to clean up old bot comments before posting new ones
|
|
41
|
+
fail_on_severities: List of severity levels that should trigger REQUEST_CHANGES
|
|
42
|
+
(e.g., ["error", "critical", "high"])
|
|
43
|
+
"""
|
|
44
|
+
self.github = github
|
|
45
|
+
self.cleanup_old_comments = cleanup_old_comments
|
|
46
|
+
self.fail_on_severities = fail_on_severities or ["error", "critical"]
|
|
47
|
+
|
|
48
|
+
async def post_findings_to_pr(
|
|
49
|
+
self,
|
|
50
|
+
report: ValidationReport,
|
|
51
|
+
create_review: bool = True,
|
|
52
|
+
add_summary_comment: bool = True,
|
|
53
|
+
) -> bool:
|
|
54
|
+
"""Post validation findings to a PR.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
report: Validation report with findings
|
|
58
|
+
create_review: Whether to create a PR review with line comments
|
|
59
|
+
add_summary_comment: Whether to add a summary comment
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
True if successful, False otherwise
|
|
63
|
+
"""
|
|
64
|
+
if self.github is None:
|
|
65
|
+
self.github = GitHubIntegration()
|
|
66
|
+
|
|
67
|
+
if not self.github.is_configured():
|
|
68
|
+
logger.error(
|
|
69
|
+
"GitHub integration not configured. "
|
|
70
|
+
"Required: GITHUB_TOKEN, GITHUB_REPOSITORY, and GITHUB_PR_NUMBER environment variables. "
|
|
71
|
+
"Ensure your workflow is triggered by a pull_request event."
|
|
72
|
+
)
|
|
73
|
+
return False
|
|
74
|
+
|
|
75
|
+
success = True
|
|
76
|
+
|
|
77
|
+
# Clean up old bot comments if enabled
|
|
78
|
+
if self.cleanup_old_comments and create_review:
|
|
79
|
+
logger.info("Cleaning up old review comments from previous runs...")
|
|
80
|
+
await self.github.cleanup_bot_review_comments(self.REVIEW_IDENTIFIER)
|
|
81
|
+
|
|
82
|
+
# Post summary comment (potentially as multiple parts)
|
|
83
|
+
if add_summary_comment:
|
|
84
|
+
from iam_validator.core.report import ReportGenerator
|
|
85
|
+
|
|
86
|
+
generator = ReportGenerator()
|
|
87
|
+
comment_parts = generator.generate_github_comment_parts(report)
|
|
88
|
+
|
|
89
|
+
# Post all parts using the multipart method
|
|
90
|
+
if not await self.github.post_multipart_comments(
|
|
91
|
+
comment_parts, self.SUMMARY_IDENTIFIER
|
|
92
|
+
):
|
|
93
|
+
logger.error("Failed to post summary comment(s)")
|
|
94
|
+
success = False
|
|
95
|
+
else:
|
|
96
|
+
if len(comment_parts) > 1:
|
|
97
|
+
logger.info(f"Posted summary in {len(comment_parts)} parts")
|
|
98
|
+
else:
|
|
99
|
+
logger.info("Posted summary comment")
|
|
100
|
+
|
|
101
|
+
# Post line-specific review comments
|
|
102
|
+
if create_review:
|
|
103
|
+
if not await self._post_review_comments(report):
|
|
104
|
+
logger.error("Failed to post review comments")
|
|
105
|
+
success = False
|
|
106
|
+
|
|
107
|
+
return success
|
|
108
|
+
|
|
109
|
+
async def _post_review_comments(self, report: ValidationReport) -> bool:
|
|
110
|
+
"""Post line-specific review comments.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
report: Validation report
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
True if successful, False otherwise
|
|
117
|
+
"""
|
|
118
|
+
if not self.github:
|
|
119
|
+
return False
|
|
120
|
+
|
|
121
|
+
# Group issues by file
|
|
122
|
+
comments_by_file: dict[str, list[dict[str, Any]]] = {}
|
|
123
|
+
|
|
124
|
+
for result in report.results:
|
|
125
|
+
if not result.issues:
|
|
126
|
+
continue
|
|
127
|
+
|
|
128
|
+
# Convert absolute path to relative path for GitHub
|
|
129
|
+
relative_path = self._make_relative_path(result.policy_file)
|
|
130
|
+
if not relative_path:
|
|
131
|
+
logger.warning(
|
|
132
|
+
f"Could not determine relative path for {result.policy_file}, skipping review comments"
|
|
133
|
+
)
|
|
134
|
+
continue
|
|
135
|
+
|
|
136
|
+
# Try to determine line numbers from the policy file
|
|
137
|
+
line_mapping = self._get_line_mapping(result.policy_file)
|
|
138
|
+
|
|
139
|
+
for issue in result.issues:
|
|
140
|
+
# Determine the line number for this issue
|
|
141
|
+
line_number = self._find_issue_line(issue, result.policy_file, line_mapping)
|
|
142
|
+
|
|
143
|
+
if line_number:
|
|
144
|
+
comment = {
|
|
145
|
+
"path": relative_path, # Use relative path for GitHub
|
|
146
|
+
"line": line_number,
|
|
147
|
+
"body": issue.to_pr_comment(),
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
if relative_path not in comments_by_file:
|
|
151
|
+
comments_by_file[relative_path] = []
|
|
152
|
+
comments_by_file[relative_path].append(comment)
|
|
153
|
+
logger.debug(
|
|
154
|
+
f"Prepared review comment for {relative_path}:{line_number} - {issue.issue_type}"
|
|
155
|
+
)
|
|
156
|
+
else:
|
|
157
|
+
logger.debug(
|
|
158
|
+
f"Could not determine line number for issue in {relative_path}: {issue.issue_type}"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# If no line-specific comments, skip
|
|
162
|
+
if not comments_by_file:
|
|
163
|
+
logger.info("No line-specific comments to post")
|
|
164
|
+
return True
|
|
165
|
+
|
|
166
|
+
# Flatten comments list
|
|
167
|
+
all_comments = []
|
|
168
|
+
for file_comments in comments_by_file.values():
|
|
169
|
+
all_comments.extend(file_comments)
|
|
170
|
+
|
|
171
|
+
logger.info(
|
|
172
|
+
f"Posting {len(all_comments)} review comments across {len(comments_by_file)} file(s)"
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Log files that will receive comments (for debugging)
|
|
176
|
+
for file_path, file_comments in comments_by_file.items():
|
|
177
|
+
logger.debug(f" {file_path}: {len(file_comments)} comment(s)")
|
|
178
|
+
|
|
179
|
+
# Determine review event based on fail_on_severities config
|
|
180
|
+
# Check if any issue has a severity that should trigger REQUEST_CHANGES
|
|
181
|
+
has_blocking_issues = any(
|
|
182
|
+
issue.severity in self.fail_on_severities
|
|
183
|
+
for result in report.results
|
|
184
|
+
for issue in result.issues
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Set review event: request changes if any blocking issues, else comment
|
|
188
|
+
event = ReviewEvent.REQUEST_CHANGES if has_blocking_issues else ReviewEvent.COMMENT
|
|
189
|
+
logger.info(f"Creating PR review with event: {event.value}")
|
|
190
|
+
|
|
191
|
+
# Post review with comments (use minimal body since summary comment has the details)
|
|
192
|
+
# Only include the identifier for cleanup purposes
|
|
193
|
+
review_body = f"{self.REVIEW_IDENTIFIER}"
|
|
194
|
+
|
|
195
|
+
success = await self.github.create_review_with_comments(
|
|
196
|
+
comments=all_comments,
|
|
197
|
+
body=review_body,
|
|
198
|
+
event=event,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
if success:
|
|
202
|
+
logger.info(f"Successfully created PR review with {len(all_comments)} comments")
|
|
203
|
+
else:
|
|
204
|
+
logger.error("Failed to create PR review")
|
|
205
|
+
|
|
206
|
+
return success
|
|
207
|
+
|
|
208
|
+
def _make_relative_path(self, policy_file: str) -> str | None:
|
|
209
|
+
"""Convert absolute path to relative path for GitHub.
|
|
210
|
+
|
|
211
|
+
GitHub PR review comments require paths relative to the repository root.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
policy_file: Absolute or relative path to policy file
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
Relative path from repository root, or None if cannot be determined
|
|
218
|
+
"""
|
|
219
|
+
import os
|
|
220
|
+
from pathlib import Path
|
|
221
|
+
|
|
222
|
+
# If already relative, use as-is
|
|
223
|
+
if not os.path.isabs(policy_file):
|
|
224
|
+
return policy_file
|
|
225
|
+
|
|
226
|
+
# Try to get workspace path from environment
|
|
227
|
+
workspace = os.getenv("GITHUB_WORKSPACE")
|
|
228
|
+
if workspace:
|
|
229
|
+
try:
|
|
230
|
+
# Convert to Path objects for proper path handling
|
|
231
|
+
abs_file_path = Path(policy_file).resolve()
|
|
232
|
+
workspace_path = Path(workspace).resolve()
|
|
233
|
+
|
|
234
|
+
# Check if file is within workspace
|
|
235
|
+
if abs_file_path.is_relative_to(workspace_path):
|
|
236
|
+
relative = abs_file_path.relative_to(workspace_path)
|
|
237
|
+
# Use forward slashes for GitHub (works on all platforms)
|
|
238
|
+
return str(relative).replace("\\", "/")
|
|
239
|
+
except (ValueError, OSError) as e:
|
|
240
|
+
logger.debug(f"Could not compute relative path for {policy_file}: {e}")
|
|
241
|
+
|
|
242
|
+
# Fallback: try current working directory
|
|
243
|
+
try:
|
|
244
|
+
cwd = Path.cwd()
|
|
245
|
+
abs_file_path = Path(policy_file).resolve()
|
|
246
|
+
if abs_file_path.is_relative_to(cwd):
|
|
247
|
+
relative = abs_file_path.relative_to(cwd)
|
|
248
|
+
return str(relative).replace("\\", "/")
|
|
249
|
+
except (ValueError, OSError) as e:
|
|
250
|
+
logger.debug(f"Could not compute relative path from CWD for {policy_file}: {e}")
|
|
251
|
+
|
|
252
|
+
# If all else fails, return None
|
|
253
|
+
logger.warning(
|
|
254
|
+
f"Could not determine relative path for {policy_file}. "
|
|
255
|
+
"Ensure GITHUB_WORKSPACE is set or file is in current directory."
|
|
256
|
+
)
|
|
257
|
+
return None
|
|
258
|
+
|
|
259
|
+
def _get_line_mapping(self, policy_file: str) -> dict[int, int]:
|
|
260
|
+
"""Get mapping of statement indices to line numbers.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
policy_file: Path to policy file
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
Dict mapping statement index to line number
|
|
267
|
+
"""
|
|
268
|
+
try:
|
|
269
|
+
with open(policy_file, encoding="utf-8") as f:
|
|
270
|
+
lines = f.readlines()
|
|
271
|
+
|
|
272
|
+
mapping: dict[int, int] = {}
|
|
273
|
+
statement_count = 0
|
|
274
|
+
in_statement_array = False
|
|
275
|
+
|
|
276
|
+
for line_num, line in enumerate(lines, start=1):
|
|
277
|
+
stripped = line.strip()
|
|
278
|
+
|
|
279
|
+
# Detect "Statement": [ or "Statement" : [
|
|
280
|
+
if '"Statement"' in stripped or "'Statement'" in stripped:
|
|
281
|
+
in_statement_array = True
|
|
282
|
+
continue
|
|
283
|
+
|
|
284
|
+
# Detect statement object start
|
|
285
|
+
if in_statement_array and stripped.startswith("{"):
|
|
286
|
+
mapping[statement_count] = line_num
|
|
287
|
+
statement_count += 1
|
|
288
|
+
|
|
289
|
+
return mapping
|
|
290
|
+
|
|
291
|
+
except Exception as e:
|
|
292
|
+
logger.warning(f"Could not parse {policy_file} for line mapping: {e}")
|
|
293
|
+
return {}
|
|
294
|
+
|
|
295
|
+
def _find_issue_line(
|
|
296
|
+
self,
|
|
297
|
+
issue: ValidationIssue,
|
|
298
|
+
policy_file: str,
|
|
299
|
+
line_mapping: dict[int, int],
|
|
300
|
+
) -> int | None:
|
|
301
|
+
"""Find the line number for an issue.
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
issue: Validation issue
|
|
305
|
+
policy_file: Path to policy file
|
|
306
|
+
line_mapping: Statement index to line number mapping
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
Line number or None
|
|
310
|
+
"""
|
|
311
|
+
# If issue has explicit line number, use it
|
|
312
|
+
if issue.line_number:
|
|
313
|
+
return issue.line_number
|
|
314
|
+
|
|
315
|
+
# Otherwise, use statement mapping
|
|
316
|
+
if issue.statement_index in line_mapping:
|
|
317
|
+
return line_mapping[issue.statement_index]
|
|
318
|
+
|
|
319
|
+
# Fallback: try to find specific field in file
|
|
320
|
+
search_term = issue.action or issue.resource or issue.condition_key
|
|
321
|
+
if search_term:
|
|
322
|
+
return self._search_for_field_line(policy_file, issue.statement_index, search_term)
|
|
323
|
+
|
|
324
|
+
return None
|
|
325
|
+
|
|
326
|
+
def _search_for_field_line(
|
|
327
|
+
self, policy_file: str, statement_idx: int, search_term: str
|
|
328
|
+
) -> int | None:
|
|
329
|
+
"""Search for a specific field within a statement.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
policy_file: Path to policy file
|
|
333
|
+
statement_idx: Statement index
|
|
334
|
+
search_term: Term to search for
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
Line number or None
|
|
338
|
+
"""
|
|
339
|
+
try:
|
|
340
|
+
with open(policy_file, encoding="utf-8") as f:
|
|
341
|
+
lines = f.readlines()
|
|
342
|
+
|
|
343
|
+
# Find the statement block
|
|
344
|
+
statement_count = 0
|
|
345
|
+
in_statement = False
|
|
346
|
+
brace_depth = 0
|
|
347
|
+
|
|
348
|
+
for line_num, line in enumerate(lines, start=1):
|
|
349
|
+
stripped = line.strip()
|
|
350
|
+
|
|
351
|
+
# Track braces
|
|
352
|
+
brace_depth += stripped.count("{") - stripped.count("}")
|
|
353
|
+
|
|
354
|
+
# Detect statement start
|
|
355
|
+
if not in_statement and stripped.startswith("{") and brace_depth > 0:
|
|
356
|
+
if statement_count == statement_idx:
|
|
357
|
+
in_statement = True
|
|
358
|
+
continue
|
|
359
|
+
statement_count += 1
|
|
360
|
+
|
|
361
|
+
# Search within the statement
|
|
362
|
+
if in_statement:
|
|
363
|
+
if search_term in line:
|
|
364
|
+
return line_num
|
|
365
|
+
|
|
366
|
+
# Exit statement when braces balance
|
|
367
|
+
if brace_depth == 0:
|
|
368
|
+
in_statement = False
|
|
369
|
+
|
|
370
|
+
return None
|
|
371
|
+
|
|
372
|
+
except Exception as e:
|
|
373
|
+
logger.debug(f"Could not search {policy_file}: {e}")
|
|
374
|
+
return None
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
async def post_report_to_pr(
|
|
378
|
+
report_file: str,
|
|
379
|
+
create_review: bool = True,
|
|
380
|
+
add_summary: bool = True,
|
|
381
|
+
config_path: str | None = None,
|
|
382
|
+
) -> bool:
|
|
383
|
+
"""Post a JSON report to a PR.
|
|
384
|
+
|
|
385
|
+
Args:
|
|
386
|
+
report_file: Path to JSON report file
|
|
387
|
+
create_review: Whether to create line-specific review
|
|
388
|
+
add_summary: Whether to add summary comment
|
|
389
|
+
config_path: Optional path to config file (to get fail_on_severity)
|
|
390
|
+
|
|
391
|
+
Returns:
|
|
392
|
+
True if successful, False otherwise
|
|
393
|
+
"""
|
|
394
|
+
try:
|
|
395
|
+
# Load report from JSON
|
|
396
|
+
with open(report_file, encoding="utf-8") as f:
|
|
397
|
+
report_data = json.load(f)
|
|
398
|
+
|
|
399
|
+
report = ValidationReport.model_validate(report_data)
|
|
400
|
+
|
|
401
|
+
# Load config to get fail_on_severity setting
|
|
402
|
+
from iam_validator.core.config.config_loader import ConfigLoader
|
|
403
|
+
|
|
404
|
+
config = ConfigLoader.load_config(config_path)
|
|
405
|
+
fail_on_severities = config.get_setting("fail_on_severity", ["error", "critical"])
|
|
406
|
+
|
|
407
|
+
# Post to PR
|
|
408
|
+
async with GitHubIntegration() as github:
|
|
409
|
+
commenter = PRCommenter(github, fail_on_severities=fail_on_severities)
|
|
410
|
+
return await commenter.post_findings_to_pr(
|
|
411
|
+
report,
|
|
412
|
+
create_review=create_review,
|
|
413
|
+
add_summary_comment=add_summary,
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
except FileNotFoundError:
|
|
417
|
+
logger.error(f"Report file not found: {report_file}")
|
|
418
|
+
return False
|
|
419
|
+
except json.JSONDecodeError as e:
|
|
420
|
+
logger.error(f"Invalid JSON in report file: {e}")
|
|
421
|
+
return False
|
|
422
|
+
except Exception as e:
|
|
423
|
+
logger.error(f"Failed to post report to PR: {e}")
|
|
424
|
+
return False
|