iam-policy-validator 1.13.1__py3-none-any.whl → 1.14.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. {iam_policy_validator-1.13.1.dist-info → iam_policy_validator-1.14.1.dist-info}/METADATA +1 -1
  2. {iam_policy_validator-1.13.1.dist-info → iam_policy_validator-1.14.1.dist-info}/RECORD +45 -39
  3. iam_validator/__version__.py +1 -1
  4. iam_validator/checks/action_condition_enforcement.py +6 -0
  5. iam_validator/checks/action_resource_matching.py +12 -12
  6. iam_validator/checks/action_validation.py +1 -0
  7. iam_validator/checks/condition_key_validation.py +2 -0
  8. iam_validator/checks/condition_type_mismatch.py +3 -0
  9. iam_validator/checks/full_wildcard.py +1 -0
  10. iam_validator/checks/mfa_condition_check.py +2 -0
  11. iam_validator/checks/policy_structure.py +9 -0
  12. iam_validator/checks/policy_type_validation.py +11 -0
  13. iam_validator/checks/principal_validation.py +5 -0
  14. iam_validator/checks/resource_validation.py +4 -0
  15. iam_validator/checks/sensitive_action.py +1 -0
  16. iam_validator/checks/service_wildcard.py +6 -3
  17. iam_validator/checks/set_operator_validation.py +3 -0
  18. iam_validator/checks/sid_uniqueness.py +2 -0
  19. iam_validator/checks/trust_policy_validation.py +3 -0
  20. iam_validator/checks/utils/__init__.py +16 -0
  21. iam_validator/checks/utils/action_parser.py +149 -0
  22. iam_validator/checks/wildcard_action.py +1 -0
  23. iam_validator/checks/wildcard_resource.py +231 -4
  24. iam_validator/commands/analyze.py +19 -1
  25. iam_validator/commands/completion.py +6 -2
  26. iam_validator/commands/validate.py +231 -12
  27. iam_validator/core/aws_service/fetcher.py +21 -9
  28. iam_validator/core/codeowners.py +245 -0
  29. iam_validator/core/config/check_documentation.py +390 -0
  30. iam_validator/core/config/config_loader.py +199 -0
  31. iam_validator/core/config/defaults.py +25 -0
  32. iam_validator/core/constants.py +1 -0
  33. iam_validator/core/diff_parser.py +8 -4
  34. iam_validator/core/finding_fingerprint.py +131 -0
  35. iam_validator/core/formatters/sarif.py +370 -128
  36. iam_validator/core/ignore_processor.py +309 -0
  37. iam_validator/core/ignored_findings.py +400 -0
  38. iam_validator/core/models.py +54 -4
  39. iam_validator/core/policy_loader.py +313 -4
  40. iam_validator/core/pr_commenter.py +223 -22
  41. iam_validator/core/report.py +22 -6
  42. iam_validator/integrations/github_integration.py +881 -123
  43. {iam_policy_validator-1.13.1.dist-info → iam_policy_validator-1.14.1.dist-info}/WHEEL +0 -0
  44. {iam_policy_validator-1.13.1.dist-info → iam_policy_validator-1.14.1.dist-info}/entry_points.txt +0 -0
  45. {iam_policy_validator-1.13.1.dist-info → iam_policy_validator-1.14.1.dist-info}/licenses/LICENSE +0 -0
@@ -16,6 +16,7 @@ from iam_validator.core.constants import (
16
16
  from iam_validator.core.diff_parser import DiffParser
17
17
  from iam_validator.core.label_manager import LabelManager
18
18
  from iam_validator.core.models import ValidationIssue, ValidationReport
19
+ from iam_validator.core.policy_loader import PolicyLineMap, PolicyLoader
19
20
  from iam_validator.core.report import ReportGenerator
20
21
  from iam_validator.integrations.github_integration import GitHubIntegration, ReviewEvent
21
22
 
@@ -64,13 +65,16 @@ class PRCommenter:
64
65
  cleanup_old_comments: bool = True,
65
66
  fail_on_severities: list[str] | None = None,
66
67
  severity_labels: dict[str, str | list[str]] | None = None,
68
+ enable_codeowners_ignore: bool = True,
69
+ allowed_ignore_users: list[str] | None = None,
67
70
  ):
68
71
  """Initialize PR commenter.
69
72
 
70
73
  Args:
71
74
  github: GitHubIntegration instance (will create one if None)
72
- cleanup_old_comments: Whether to clean up old bot comments before posting new ones
73
- (kept for backward compatibility but now handled automatically)
75
+ cleanup_old_comments: Whether to clean up old bot comments after posting new ones.
76
+ Set to False in streaming mode where files are processed one at a time
77
+ to avoid deleting comments from files processed earlier.
74
78
  fail_on_severities: List of severity levels that should trigger REQUEST_CHANGES
75
79
  (e.g., ["error", "critical", "high"])
76
80
  severity_labels: Mapping of severity levels to label name(s) for automatic label management
@@ -79,13 +83,21 @@ class PRCommenter:
79
83
  - Single: {"error": "iam-validity-error", "critical": "security-critical"}
80
84
  - Multiple: {"error": ["iam-error", "needs-fix"], "critical": ["security-critical", "needs-review"]}
81
85
  - Mixed: {"error": "iam-validity-error", "critical": ["security-critical", "needs-review"]}
86
+ enable_codeowners_ignore: Whether to enable CODEOWNERS-based ignore feature
87
+ allowed_ignore_users: Fallback users who can ignore findings when no CODEOWNERS
82
88
  """
83
89
  self.github = github
84
90
  self.cleanup_old_comments = cleanup_old_comments
85
91
  self.fail_on_severities = fail_on_severities or ["error", "critical"]
86
92
  self.severity_labels = severity_labels or {}
93
+ self.enable_codeowners_ignore = enable_codeowners_ignore
94
+ self.allowed_ignore_users = allowed_ignore_users or []
87
95
  # Track issues in modified statements that are on unchanged lines
88
96
  self._context_issues: list[ContextIssue] = []
97
+ # Track ignored finding IDs for the current run
98
+ self._ignored_finding_ids: frozenset[str] = frozenset()
99
+ # Cache for PolicyLineMap per file (for field-level line detection)
100
+ self._policy_line_maps: dict[str, PolicyLineMap] = {}
89
101
 
90
102
  async def post_findings_to_pr(
91
103
  self,
@@ -93,6 +105,7 @@ class PRCommenter:
93
105
  create_review: bool = True,
94
106
  add_summary_comment: bool = True,
95
107
  manage_labels: bool = True,
108
+ process_ignores: bool = True,
96
109
  ) -> bool:
97
110
  """Post validation findings to a PR.
98
111
 
@@ -101,6 +114,7 @@ class PRCommenter:
101
114
  create_review: Whether to create a PR review with line comments
102
115
  add_summary_comment: Whether to add a summary comment
103
116
  manage_labels: Whether to manage PR labels based on severity findings
117
+ process_ignores: Whether to process pending ignore commands
104
118
 
105
119
  Returns:
106
120
  True if successful, False otherwise
@@ -118,6 +132,14 @@ class PRCommenter:
118
132
 
119
133
  success = True
120
134
 
135
+ # Process pending ignore commands first (if enabled)
136
+ if process_ignores and self.enable_codeowners_ignore:
137
+ await self._process_ignore_commands()
138
+
139
+ # Load ignored findings for filtering
140
+ if self.enable_codeowners_ignore:
141
+ await self._load_ignored_findings()
142
+
121
143
  # Note: Cleanup is now handled smartly by update_or_create_review_comments()
122
144
  # It will update existing comments, create new ones, and delete resolved ones
123
145
 
@@ -131,7 +153,11 @@ class PRCommenter:
131
153
  # Post summary comment (potentially as multiple parts)
132
154
  if add_summary_comment:
133
155
  generator = ReportGenerator()
134
- comment_parts = generator.generate_github_comment_parts(report)
156
+ # Pass ignored count to show in summary
157
+ ignored_count = len(self._ignored_finding_ids) if self._ignored_finding_ids else 0
158
+ comment_parts = generator.generate_github_comment_parts(
159
+ report, ignored_count=ignored_count
160
+ )
135
161
 
136
162
  # Post all parts using the multipart method
137
163
  if not await self.github.post_multipart_comments(
@@ -189,7 +215,20 @@ class PRCommenter:
189
215
  parsed_diffs = {}
190
216
  else:
191
217
  parsed_diffs = DiffParser.parse_pr_files(pr_files)
192
- logger.info(f"Parsed diffs for {len(parsed_diffs)} file(s)")
218
+ # Use warning level for diagnostics to ensure visibility
219
+ logger.warning(
220
+ f"[DIFF] Parsed diffs for {len(parsed_diffs)} file(s): {list(parsed_diffs.keys())}"
221
+ )
222
+
223
+ # Collect ALL validated files (for cleanup of resolved findings)
224
+ # This includes files with no issues - we need to track them so stale comments get deleted
225
+ validated_files: set[str] = set()
226
+ for result in report.results:
227
+ relative_path = self._make_relative_path(result.policy_file)
228
+ if relative_path:
229
+ validated_files.add(relative_path)
230
+
231
+ logger.debug(f"Tracking {len(validated_files)} validated files for comment cleanup")
193
232
 
194
233
  # Group issues by file
195
234
  inline_comments: list[dict[str, Any]] = []
@@ -207,14 +246,30 @@ class PRCommenter:
207
246
  )
208
247
  continue
209
248
 
249
+ # Use warning level for path diagnostics to ensure visibility
250
+ logger.warning(f"[PATH] Processing: {result.policy_file} -> '{relative_path}'")
251
+
210
252
  # Get diff info for this file
211
253
  diff_info = parsed_diffs.get(relative_path)
212
254
  if not diff_info:
213
- logger.debug(
214
- f"{relative_path} not in PR diff or no changes, skipping inline comments"
255
+ # Log ALL available paths to help diagnose path mismatches
256
+ all_paths = list(parsed_diffs.keys())
257
+ logger.warning(
258
+ f"'{relative_path}' not found in PR diff. "
259
+ f"Available paths ({len(all_paths)}): {all_paths}"
215
260
  )
216
- # Still process issues for summary
261
+ # Check for partial matches to help diagnose
262
+ for avail_path in all_paths:
263
+ if relative_path.endswith(avail_path.split("/")[-1]):
264
+ logger.warning(
265
+ f" Possible match by filename: '{avail_path}' "
266
+ f"(basename matches '{relative_path.split('/')[-1]}')"
267
+ )
268
+ # Still process issues for summary (excluding ignored)
217
269
  for issue in result.issues:
270
+ # Skip ignored issues
271
+ if self._is_issue_ignored(issue, relative_path):
272
+ continue
218
273
  if issue.statement_index is not None:
219
274
  line_num = self._find_issue_line(
220
275
  issue, result.policy_file, self._get_line_mapping(result.policy_file)
@@ -232,13 +287,27 @@ class PRCommenter:
232
287
  line_mapping, diff_info.changed_lines, result.policy_file
233
288
  )
234
289
 
235
- logger.debug(
236
- f"{relative_path}: {len(diff_info.changed_lines)} changed lines, "
237
- f"{len(modified_statements)} modified statements"
238
- )
290
+ # Check if this file has no patch (large file or GitHub truncated the diff)
291
+ # In this case, we allow inline comments on any line since the file is in the PR
292
+ allow_all_lines = diff_info.status.endswith("_no_patch")
293
+ if allow_all_lines:
294
+ logger.warning(
295
+ f"[MATCH] {relative_path}: No patch available (status={diff_info.status}), "
296
+ "allowing inline comments on any line"
297
+ )
298
+ else:
299
+ logger.warning(
300
+ f"[MATCH] {relative_path}: FOUND in diff with {len(diff_info.changed_lines)} changed lines, "
301
+ f"{len(modified_statements)} modified statements, status={diff_info.status}"
302
+ )
239
303
 
240
- # Process each issue with strict filtering
304
+ # Process each issue with filtering (relaxed for no_patch files)
241
305
  for issue in result.issues:
306
+ # Skip ignored issues
307
+ if self._is_issue_ignored(issue, relative_path):
308
+ logger.debug(f"Skipped ignored issue in {relative_path}: {issue.issue_type}")
309
+ continue
310
+
242
311
  line_number = self._find_issue_line(issue, result.policy_file, line_mapping)
243
312
 
244
313
  if not line_number:
@@ -253,7 +322,10 @@ class PRCommenter:
253
322
  # Try to find the best line to post the comment
254
323
  comment_line = None
255
324
 
256
- if line_number in diff_info.changed_lines:
325
+ if allow_all_lines:
326
+ # No patch - post at the actual line
327
+ comment_line = line_number
328
+ elif line_number in diff_info.changed_lines:
257
329
  # Best case: line 1 is in the diff
258
330
  comment_line = line_number
259
331
  elif diff_info.changed_lines:
@@ -270,7 +342,7 @@ class PRCommenter:
270
342
  {
271
343
  "path": relative_path,
272
344
  "line": comment_line,
273
- "body": issue.to_pr_comment(),
345
+ "body": issue.to_pr_comment(file_path=relative_path),
274
346
  }
275
347
  )
276
348
  logger.debug(
@@ -285,18 +357,19 @@ class PRCommenter:
285
357
  logger.debug(
286
358
  f"Policy-level issue (no diff lines): {relative_path} - {issue.issue_type}"
287
359
  )
288
- # STRICT FILTERING: Only comment if line is in the diff
289
- elif line_number in diff_info.changed_lines:
290
- # Exact match - post inline comment
360
+ # RELAXED FILTERING for no_patch files, STRICT for others
361
+ elif allow_all_lines or line_number in diff_info.changed_lines:
362
+ # No patch: allow all lines, or exact match with changed lines
291
363
  inline_comments.append(
292
364
  {
293
365
  "path": relative_path,
294
366
  "line": line_number,
295
- "body": issue.to_pr_comment(),
367
+ "body": issue.to_pr_comment(file_path=relative_path),
296
368
  }
297
369
  )
298
370
  logger.debug(
299
371
  f"Inline comment: {relative_path}:{line_number} - {issue.issue_type}"
372
+ f"{' (no_patch)' if allow_all_lines else ''}"
300
373
  )
301
374
  elif issue.statement_index in modified_statements:
302
375
  # Issue in modified statement but on unchanged line - save for summary
@@ -319,14 +392,31 @@ class PRCommenter:
319
392
  f"{context_issue_count} context issues for summary"
320
393
  )
321
394
 
322
- # If no inline comments, skip review creation but still return success
395
+ # Even if no inline comments, we still need to run cleanup to delete stale comments
396
+ # from previous runs where findings have been resolved (unless cleanup is disabled)
323
397
  if not inline_comments:
324
398
  logger.info("No inline comments to post (after diff filtering)")
399
+ # Still run cleanup to delete any stale comments from resolved findings
400
+ # (unless skip_cleanup is set for streaming mode)
401
+ if validated_files and self.cleanup_old_comments:
402
+ logger.debug("Running cleanup for stale comments from resolved findings...")
403
+ await self.github.update_or_create_review_comments(
404
+ comments=[],
405
+ body="",
406
+ event=ReviewEvent.COMMENT,
407
+ identifier=self.REVIEW_IDENTIFIER,
408
+ validated_files=validated_files,
409
+ skip_cleanup=False, # Explicitly run cleanup
410
+ )
325
411
  return True
326
412
 
327
413
  # Determine review event based on fail_on_severities config
414
+ # Exclude ignored findings from blocking issues
328
415
  has_blocking_issues = any(
329
416
  issue.severity in self.fail_on_severities
417
+ and not self._is_issue_ignored(
418
+ issue, self._make_relative_path(result.policy_file) or ""
419
+ )
330
420
  for result in report.results
331
421
  for issue in result.issues
332
422
  )
@@ -337,6 +427,9 @@ class PRCommenter:
337
427
  )
338
428
 
339
429
  # Post review with smart update-or-create logic
430
+ # Pass validated_files to ensure stale comments are deleted even for files
431
+ # that no longer have any findings (issues were resolved)
432
+ # Use skip_cleanup based on cleanup_old_comments flag (False in streaming mode)
340
433
  review_body = f"{self.REVIEW_IDENTIFIER}"
341
434
 
342
435
  success = await self.github.update_or_create_review_comments(
@@ -344,6 +437,8 @@ class PRCommenter:
344
437
  body=review_body,
345
438
  event=event,
346
439
  identifier=self.REVIEW_IDENTIFIER,
440
+ validated_files=validated_files,
441
+ skip_cleanup=not self.cleanup_old_comments, # Skip cleanup in streaming mode
347
442
  )
348
443
 
349
444
  if success:
@@ -369,10 +464,15 @@ class PRCommenter:
369
464
 
370
465
  # If already relative, use as-is
371
466
  if not os.path.isabs(policy_file):
467
+ logger.debug(f"Path already relative: {policy_file}")
372
468
  return policy_file
373
469
 
374
470
  # Try to get workspace path from environment
375
471
  workspace = os.getenv("GITHUB_WORKSPACE")
472
+ # Log first call only to avoid spam
473
+ if not hasattr(self, "_logged_workspace"):
474
+ self._logged_workspace = True
475
+ logger.warning(f"[ENV] GITHUB_WORKSPACE={workspace}")
376
476
  if workspace:
377
477
  try:
378
478
  # Convert to Path objects for proper path handling
@@ -383,7 +483,12 @@ class PRCommenter:
383
483
  if abs_file_path.is_relative_to(workspace_path):
384
484
  relative = abs_file_path.relative_to(workspace_path)
385
485
  # Use forward slashes for GitHub (works on all platforms)
386
- return str(relative).replace("\\", "/")
486
+ result = str(relative).replace("\\", "/")
487
+ return result
488
+ else:
489
+ logger.warning(
490
+ f"[PATH] File not within workspace: {abs_file_path} not in {workspace_path}"
491
+ )
387
492
  except (ValueError, OSError) as e:
388
493
  logger.debug(f"Could not compute relative path for {policy_file}: {e}")
389
494
 
@@ -448,6 +553,10 @@ class PRCommenter:
448
553
  ) -> int | None:
449
554
  """Find the line number for an issue.
450
555
 
556
+ Uses field-level line detection when available for precise comment placement.
557
+ For example, an issue about an invalid Action will point to the exact
558
+ Action line, not just the statement start.
559
+
451
560
  Args:
452
561
  issue: Validation issue
453
562
  policy_file: Path to policy file
@@ -460,17 +569,51 @@ class PRCommenter:
460
569
  if issue.line_number:
461
570
  return issue.line_number
462
571
 
463
- # Otherwise, use statement mapping
572
+ # Try field-level line detection first (most precise)
573
+ if issue.field_name and issue.statement_index >= 0:
574
+ policy_line_map = self._get_policy_line_map(policy_file)
575
+ if policy_line_map:
576
+ field_line = policy_line_map.get_line_for_field(
577
+ issue.statement_index, issue.field_name
578
+ )
579
+ if field_line:
580
+ return field_line
581
+
582
+ # Fallback: use statement mapping
464
583
  if issue.statement_index in line_mapping:
465
584
  return line_mapping[issue.statement_index]
466
585
 
467
- # Fallback: try to find specific field in file
586
+ # Fallback: try to find specific field in file by searching
468
587
  search_term = issue.action or issue.resource or issue.condition_key
469
588
  if search_term:
470
589
  return self._search_for_field_line(policy_file, issue.statement_index, search_term)
471
590
 
472
591
  return None
473
592
 
593
+ def _get_policy_line_map(self, policy_file: str) -> PolicyLineMap | None:
594
+ """Get cached PolicyLineMap for field-level line detection.
595
+
596
+ Args:
597
+ policy_file: Path to policy file
598
+
599
+ Returns:
600
+ PolicyLineMap or None if parsing failed
601
+ """
602
+ if policy_file in self._policy_line_maps:
603
+ return self._policy_line_maps[policy_file]
604
+
605
+ try:
606
+ with open(policy_file, encoding="utf-8") as f:
607
+ content = f.read()
608
+
609
+ policy_map = PolicyLoader.parse_statement_field_lines(content)
610
+ self._policy_line_maps[policy_file] = policy_map
611
+ return policy_map
612
+
613
+ except Exception as e: # pylint: disable=broad-exception-caught
614
+ logger.debug(f"Could not parse field lines for {policy_file}: {e}")
615
+ return None
616
+
474
617
  def _search_for_field_line(
475
618
  self, policy_file: str, statement_idx: int, search_term: str
476
619
  ) -> int | None:
@@ -521,6 +664,57 @@ class PRCommenter:
521
664
  logger.debug(f"Could not search {policy_file}: {e}")
522
665
  return None
523
666
 
667
+ async def _process_ignore_commands(self) -> None:
668
+ """Process pending ignore commands from PR comments."""
669
+ if not self.github:
670
+ return
671
+
672
+ from iam_validator.core.ignore_processor import ( # pylint: disable=import-outside-toplevel
673
+ IgnoreCommandProcessor,
674
+ )
675
+
676
+ processor = IgnoreCommandProcessor(
677
+ github=self.github,
678
+ allowed_users=self.allowed_ignore_users,
679
+ )
680
+ ignored_count = await processor.process_pending_ignores()
681
+ if ignored_count > 0:
682
+ logger.info(f"Processed {ignored_count} ignore command(s)")
683
+
684
+ async def _load_ignored_findings(self) -> None:
685
+ """Load ignored findings for the current PR."""
686
+ if not self.github:
687
+ return
688
+
689
+ from iam_validator.core.ignored_findings import ( # pylint: disable=import-outside-toplevel
690
+ IgnoredFindingsStore,
691
+ )
692
+
693
+ store = IgnoredFindingsStore(self.github)
694
+ self._ignored_finding_ids = await store.get_ignored_ids()
695
+ if self._ignored_finding_ids:
696
+ logger.debug(f"Loaded {len(self._ignored_finding_ids)} ignored finding(s)")
697
+
698
+ def _is_issue_ignored(self, issue: ValidationIssue, file_path: str) -> bool:
699
+ """Check if an issue should be ignored.
700
+
701
+ Args:
702
+ issue: The validation issue
703
+ file_path: Relative path to the policy file
704
+
705
+ Returns:
706
+ True if the issue is ignored
707
+ """
708
+ if not self._ignored_finding_ids:
709
+ return False
710
+
711
+ from iam_validator.core.finding_fingerprint import ( # pylint: disable=import-outside-toplevel
712
+ FindingFingerprint,
713
+ )
714
+
715
+ fingerprint = FindingFingerprint.from_issue(issue, file_path)
716
+ return fingerprint.to_hash() in self._ignored_finding_ids
717
+
524
718
 
525
719
  async def post_report_to_pr(
526
720
  report_file: str,
@@ -555,12 +749,19 @@ async def post_report_to_pr(
555
749
  fail_on_severities = config.get_setting("fail_on_severity", ["error", "critical"])
556
750
  severity_labels = config.get_setting("severity_labels", {})
557
751
 
752
+ # Get ignore settings
753
+ ignore_settings = config.get_setting("ignore_settings", {})
754
+ enable_codeowners_ignore = ignore_settings.get("enabled", True)
755
+ allowed_ignore_users = ignore_settings.get("allowed_users", [])
756
+
558
757
  # Post to PR
559
758
  async with GitHubIntegration() as github:
560
759
  commenter = PRCommenter(
561
760
  github,
562
761
  fail_on_severities=fail_on_severities,
563
762
  severity_labels=severity_labels,
763
+ enable_codeowners_ignore=enable_codeowners_ignore,
764
+ allowed_ignore_users=allowed_ignore_users,
564
765
  )
565
766
  return await commenter.post_findings_to_pr(
566
767
  report,
@@ -238,12 +238,14 @@ class ReportGenerator:
238
238
  self,
239
239
  report: ValidationReport,
240
240
  max_length_per_part: int = constants.GITHUB_COMMENT_SPLIT_LIMIT,
241
+ ignored_count: int = 0,
241
242
  ) -> list[str]:
242
243
  """Generate GitHub PR comment(s), splitting into multiple parts if needed.
243
244
 
244
245
  Args:
245
246
  report: Validation report
246
247
  max_length_per_part: Maximum character length per comment part (default from GITHUB_COMMENT_SPLIT_LIMIT)
248
+ ignored_count: Number of findings that were ignored (will be shown in summary)
247
249
 
248
250
  Returns:
249
251
  List of comment parts (each under max_length_per_part)
@@ -255,13 +257,13 @@ class ReportGenerator:
255
257
  if estimated_size <= max_length_per_part:
256
258
  # Try single comment
257
259
  single_comment = self.generate_github_comment(
258
- report, max_length=max_length_per_part * 2
260
+ report, max_length=max_length_per_part * 2, ignored_count=ignored_count
259
261
  )
260
262
  if len(single_comment) <= max_length_per_part:
261
263
  return [single_comment]
262
264
 
263
265
  # Need to split into multiple parts
264
- return self._generate_split_comments(report, max_length_per_part)
266
+ return self._generate_split_comments(report, max_length_per_part, ignored_count)
265
267
 
266
268
  def _estimate_report_size(self, report: ValidationReport) -> int:
267
269
  """Estimate the size of the report in characters.
@@ -277,12 +279,15 @@ class ReportGenerator:
277
279
  report.total_issues * constants.COMMENT_CHARS_PER_ISSUE_ESTIMATE
278
280
  )
279
281
 
280
- def _generate_split_comments(self, report: ValidationReport, max_length: int) -> list[str]:
282
+ def _generate_split_comments(
283
+ self, report: ValidationReport, max_length: int, ignored_count: int = 0
284
+ ) -> list[str]:
281
285
  """Split a large report into multiple comment parts.
282
286
 
283
287
  Args:
284
288
  report: Validation report
285
289
  max_length: Maximum length per part
290
+ ignored_count: Number of ignored findings to show in summary
286
291
 
287
292
  Returns:
288
293
  List of comment parts
@@ -290,7 +295,7 @@ class ReportGenerator:
290
295
  parts: list[str] = []
291
296
 
292
297
  # Generate header (will be in first part only)
293
- header_lines = self._generate_header(report)
298
+ header_lines = self._generate_header(report, ignored_count)
294
299
  header_content = "\n".join(header_lines)
295
300
 
296
301
  # Generate footer (will be in all parts)
@@ -383,8 +388,13 @@ class ReportGenerator:
383
388
 
384
389
  return parts
385
390
 
386
- def _generate_header(self, report: ValidationReport) -> list[str]:
387
- """Generate the comment header with summary."""
391
+ def _generate_header(self, report: ValidationReport, ignored_count: int = 0) -> list[str]:
392
+ """Generate the comment header with summary.
393
+
394
+ Args:
395
+ report: Validation report
396
+ ignored_count: Number of findings that were ignored
397
+ """
388
398
  lines = []
389
399
 
390
400
  # Title with emoji and status badge
@@ -414,6 +424,8 @@ class ReportGenerator:
414
424
  lines.append(
415
425
  f"| **Total Issues Found** | {report.total_issues} | {'⚠️' if report.total_issues > 0 else '✨'} |"
416
426
  )
427
+ if ignored_count > 0:
428
+ lines.append(f"| **Ignored Findings** | {ignored_count} | 🔕 |")
417
429
  lines.append("")
418
430
 
419
431
  # Issue breakdown
@@ -527,12 +539,14 @@ class ReportGenerator:
527
539
  self,
528
540
  report: ValidationReport,
529
541
  max_length: int = constants.GITHUB_MAX_COMMENT_LENGTH,
542
+ ignored_count: int = 0,
530
543
  ) -> str:
531
544
  """Generate a GitHub-flavored markdown comment for PR reviews.
532
545
 
533
546
  Args:
534
547
  report: Validation report
535
548
  max_length: Maximum character length (default from GITHUB_MAX_COMMENT_LENGTH constant)
549
+ ignored_count: Number of findings that were ignored (will be shown in summary)
536
550
 
537
551
  Returns:
538
552
  Markdown formatted string
@@ -567,6 +581,8 @@ class ReportGenerator:
567
581
  lines.append(
568
582
  f"| **Total Issues Found** | {report.total_issues} | {'⚠️' if report.total_issues > 0 else '✨'} |"
569
583
  )
584
+ if ignored_count > 0:
585
+ lines.append(f"| **Ignored Findings** | {ignored_count} | 🔕 |")
570
586
  lines.append("")
571
587
 
572
588
  # Issue breakdown