reporails-cli 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. reporails_cli/.env.example +1 -0
  2. reporails_cli/__init__.py +24 -0
  3. reporails_cli/bundled/.semgrepignore +51 -0
  4. reporails_cli/bundled/__init__.py +31 -0
  5. reporails_cli/bundled/capability-patterns.yml +54 -0
  6. reporails_cli/bundled/levels.yml +99 -0
  7. reporails_cli/core/__init__.py +35 -0
  8. reporails_cli/core/agents.py +147 -0
  9. reporails_cli/core/applicability.py +150 -0
  10. reporails_cli/core/bootstrap.py +147 -0
  11. reporails_cli/core/cache.py +352 -0
  12. reporails_cli/core/capability.py +245 -0
  13. reporails_cli/core/discover.py +362 -0
  14. reporails_cli/core/engine.py +177 -0
  15. reporails_cli/core/init.py +309 -0
  16. reporails_cli/core/levels.py +177 -0
  17. reporails_cli/core/models.py +329 -0
  18. reporails_cli/core/opengrep/__init__.py +34 -0
  19. reporails_cli/core/opengrep/runner.py +203 -0
  20. reporails_cli/core/opengrep/semgrepignore.py +39 -0
  21. reporails_cli/core/opengrep/templates.py +138 -0
  22. reporails_cli/core/registry.py +155 -0
  23. reporails_cli/core/sarif.py +181 -0
  24. reporails_cli/core/scorer.py +178 -0
  25. reporails_cli/core/semantic.py +193 -0
  26. reporails_cli/core/utils.py +139 -0
  27. reporails_cli/formatters/__init__.py +19 -0
  28. reporails_cli/formatters/json.py +137 -0
  29. reporails_cli/formatters/mcp.py +68 -0
  30. reporails_cli/formatters/text/__init__.py +32 -0
  31. reporails_cli/formatters/text/box.py +89 -0
  32. reporails_cli/formatters/text/chars.py +42 -0
  33. reporails_cli/formatters/text/compact.py +119 -0
  34. reporails_cli/formatters/text/components.py +117 -0
  35. reporails_cli/formatters/text/full.py +135 -0
  36. reporails_cli/formatters/text/rules.py +50 -0
  37. reporails_cli/formatters/text/violations.py +92 -0
  38. reporails_cli/interfaces/__init__.py +1 -0
  39. reporails_cli/interfaces/cli/__init__.py +7 -0
  40. reporails_cli/interfaces/cli/main.py +352 -0
  41. reporails_cli/interfaces/mcp/__init__.py +5 -0
  42. reporails_cli/interfaces/mcp/server.py +194 -0
  43. reporails_cli/interfaces/mcp/tools.py +136 -0
  44. reporails_cli/py.typed +0 -0
  45. reporails_cli/templates/__init__.py +65 -0
  46. reporails_cli/templates/cli_box.txt +10 -0
  47. reporails_cli/templates/cli_cta.txt +4 -0
  48. reporails_cli/templates/cli_delta.txt +1 -0
  49. reporails_cli/templates/cli_file_header.txt +1 -0
  50. reporails_cli/templates/cli_legend.txt +1 -0
  51. reporails_cli/templates/cli_pending.txt +3 -0
  52. reporails_cli/templates/cli_violation.txt +1 -0
  53. reporails_cli/templates/cli_working.txt +2 -0
  54. reporails_cli-0.0.1.dist-info/METADATA +108 -0
  55. reporails_cli-0.0.1.dist-info/RECORD +58 -0
  56. reporails_cli-0.0.1.dist-info/WHEEL +4 -0
  57. reporails_cli-0.0.1.dist-info/entry_points.txt +3 -0
  58. reporails_cli-0.0.1.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,329 @@
1
+ """Data models for reporails. All models are frozen (immutable) where possible."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from enum import Enum
7
+ from pathlib import Path
8
+ from typing import TYPE_CHECKING
9
+
10
+ if TYPE_CHECKING:
11
+ from reporails_cli.core.cache import AnalyticsEntry
12
+
13
+
14
+ class Category(str, Enum):
15
+ """Rule categories matching framework."""
16
+
17
+ STRUCTURE = "structure"
18
+ CONTENT = "content"
19
+ MAINTENANCE = "maintenance"
20
+ GOVERNANCE = "governance"
21
+ EFFICIENCY = "efficiency"
22
+
23
+
24
+ class RuleType(str, Enum):
25
+ """How the rule is detected. Two types only."""
26
+
27
+ DETERMINISTIC = "deterministic" # OpenGrep pattern → direct violation
28
+ SEMANTIC = "semantic" # LLM judgment required
29
+
30
+
31
+ class Severity(str, Enum):
32
+ """Violation severity levels."""
33
+
34
+ CRITICAL = "critical" # Weight: 5.5
35
+ HIGH = "high" # Weight: 4.0
36
+ MEDIUM = "medium" # Weight: 2.5
37
+ LOW = "low" # Weight: 1.0
38
+
39
+
40
+ class Level(str, Enum):
41
+ """Capability levels from framework."""
42
+
43
+ L1 = "L1" # Absent
44
+ L2 = "L2" # Basic
45
+ L3 = "L3" # Structured
46
+ L4 = "L4" # Abstracted
47
+ L5 = "L5" # Governed
48
+ L6 = "L6" # Adaptive
49
+
50
+
51
+ @dataclass(frozen=True)
52
+ class Check:
53
+ """A specific check within a rule. Maps to OpenGrep pattern."""
54
+
55
+ id: str # e.g., "S1-root-too-long"
56
+ name: str # e.g., "Root file exceeds 200 lines"
57
+ severity: Severity
58
+
59
+
60
+ @dataclass
61
+ class Rule:
62
+ """A rule definition loaded from framework frontmatter."""
63
+
64
+ # Required (from frontmatter)
65
+ id: str # e.g., "S1"
66
+ title: str # e.g., "Size Limits"
67
+ category: Category
68
+ type: RuleType
69
+ level: str # e.g., "L2" - minimum level this rule applies to
70
+
71
+ # Checks (deterministic rules)
72
+ checks: list[Check] = field(default_factory=list)
73
+
74
+ # Semantic fields (semantic rules)
75
+ question: str | None = None
76
+ criteria: list[dict[str, str]] | str | None = None # [{key, check}, ...] or string
77
+ choices: list[dict[str, str]] | list[str] | None = None # [{value, label}, ...]
78
+ pass_value: str | None = None
79
+ examples: dict[str, list[str]] | None = None # {good: [...], bad: [...]}
80
+
81
+ # References
82
+ sources: list[int] = field(default_factory=list)
83
+ see_also: list[str] = field(default_factory=list)
84
+
85
+ # Legacy field names (for backward compatibility during transition)
86
+ detection: str | None = None
87
+ scoring: int = 0
88
+ validation: str | None = None
89
+
90
+ # Paths (set after loading)
91
+ md_path: Path | None = None
92
+ yml_path: Path | None = None
93
+
94
+
95
+ @dataclass(frozen=True)
96
+ class Violation:
97
+ """A rule violation found during analysis."""
98
+
99
+ rule_id: str # e.g., "S1"
100
+ rule_title: str # e.g., "Size Limits"
101
+ location: str # e.g., "CLAUDE.md:45"
102
+ message: str # From OpenGrep
103
+ severity: Severity
104
+ check_id: str | None = None # e.g., "S1-root-too-long"
105
+
106
+
107
+ @dataclass(frozen=True)
108
+ class JudgmentRequest:
109
+ """Request for host LLM to evaluate semantic rule."""
110
+
111
+ rule_id: str
112
+ rule_title: str
113
+ content: str # Text to evaluate
114
+ location: str # e.g., "CLAUDE.md"
115
+ question: str # What to evaluate
116
+ criteria: dict[str, str] # {key: check, ...}
117
+ examples: dict[str, list[str]] # {good: [...], bad: [...]}
118
+ choices: list[str] # [value, ...]
119
+ pass_value: str # Which choice means "pass"
120
+ severity: Severity
121
+ points_if_fail: int
122
+
123
+
124
+ @dataclass(frozen=True)
125
+ class JudgmentResponse:
126
+ """Response from host LLM after evaluation."""
127
+
128
+ rule_id: str
129
+ verdict: str # One of the choice values
130
+ reason: str # Explanation
131
+ passed: bool # verdict == pass_value
132
+
133
+
134
+ # =============================================================================
135
+ # Feature Detection Models
136
+ # =============================================================================
137
+
138
+
139
+ @dataclass
140
+ class DetectedFeatures:
141
+ """Features detected in a project for capability scoring.
142
+
143
+ Populated in two phases:
144
+ - Phase 1: Filesystem detection (applicability.py)
145
+ - Phase 2: Content detection (capability.py via OpenGrep)
146
+ """
147
+
148
+ # === Phase 1: Filesystem detection ===
149
+
150
+ # Base existence
151
+ has_instruction_file: bool = False # Any instruction file found
152
+ has_claude_md: bool = False # CLAUDE.md at root (legacy compat)
153
+
154
+ # Directory structure
155
+ has_rules_dir: bool = False # .claude/rules/, .cursor/rules/, etc.
156
+ has_shared_files: bool = False # .shared/, shared/, cross-refs
157
+ has_backbone: bool = False # .reporails/backbone.yml
158
+
159
+ # Discovery
160
+ component_count: int = 0 # Components from discovery
161
+ instruction_file_count: int = 0
162
+ has_multiple_instruction_files: bool = False
163
+ has_hierarchical_structure: bool = False # nested CLAUDE.md files
164
+ detected_agents: list[str] = field(default_factory=list)
165
+
166
+ # === Phase 2: Content detection (OpenGrep) ===
167
+
168
+ # Content analysis
169
+ has_sections: bool = False # Has H2+ headers
170
+ has_imports: bool = False # @imports or file references
171
+ has_explicit_constraints: bool = False # MUST/NEVER keywords
172
+ has_path_scoped_rules: bool = False # Rules with paths: frontmatter
173
+
174
+
175
+ @dataclass(frozen=True)
176
+ class ContentFeatures:
177
+ """Intermediate result from OpenGrep content analysis."""
178
+
179
+ has_sections: bool = False
180
+ has_imports: bool = False
181
+ has_explicit_constraints: bool = False
182
+ has_path_scoped_rules: bool = False
183
+
184
+
185
+ @dataclass(frozen=True)
186
+ class CapabilityResult:
187
+ """Result of capability detection pipeline."""
188
+
189
+ features: DetectedFeatures
190
+ capability_score: int # 0-12
191
+ level: Level # Base level (L1-L6)
192
+ has_orphan_features: bool # Has features above base level (display as L3+)
193
+ feature_summary: str # Human-readable
194
+
195
+
196
+ @dataclass(frozen=True)
197
+ class FrictionEstimate:
198
+ """Friction estimate from violations."""
199
+
200
+ level: str # "extreme", "high", "medium", "small", "none"
201
+
202
+
203
+ # =============================================================================
204
+ # Configuration Models
205
+ # =============================================================================
206
+
207
+
208
+ @dataclass
209
+ class GlobalConfig:
210
+ """Global user configuration (~/.reporails/config.yml)."""
211
+
212
+ framework_path: Path | None = None # Local override (dev)
213
+ auto_update_check: bool = True
214
+
215
+
216
+ @dataclass
217
+ class ProjectConfig:
218
+ """Project-level configuration (.reporails/config.yml)."""
219
+
220
+ framework_version: str | None = None # Pin version
221
+ disabled_rules: list[str] = field(default_factory=list)
222
+ overrides: dict[str, dict[str, str]] = field(default_factory=dict)
223
+
224
+
225
+ # =============================================================================
226
+ # Result Models
227
+ # =============================================================================
228
+
229
+
230
+ @dataclass(frozen=True)
231
+ class ScanDelta:
232
+ """Comparison between current and previous scan."""
233
+
234
+ score_delta: float | None # None if no previous or unchanged
235
+ level_previous: str | None # None if unchanged or no previous
236
+ level_improved: bool | None # True=up, False=down, None=unchanged/no previous
237
+ violations_delta: int | None # Negative=improvement, positive=regression, None if unchanged
238
+
239
+ @classmethod
240
+ def compute(
241
+ cls,
242
+ current_score: float,
243
+ current_level: str,
244
+ current_violations: int,
245
+ previous: AnalyticsEntry | None,
246
+ ) -> ScanDelta:
247
+ """Compute delta from current values and previous scan entry.
248
+
249
+ Args:
250
+ current_score: Current scan score
251
+ current_level: Current level (e.g., "L3")
252
+ current_violations: Current violation count
253
+ previous: Previous AnalyticsEntry or None
254
+
255
+ Returns:
256
+ ScanDelta with computed differences
257
+ """
258
+ if previous is None:
259
+ return cls(None, None, None, None)
260
+
261
+ # Score delta (round to 1 decimal, None if unchanged)
262
+ raw_score_delta = round(current_score - previous.score, 1)
263
+ score_delta = raw_score_delta if raw_score_delta != 0 else None
264
+
265
+ # Level comparison (extract number from "L3" etc)
266
+ curr_num = int(current_level[1]) if current_level.startswith("L") else 0
267
+ prev_num = int(previous.level[1]) if previous.level.startswith("L") else 0
268
+ if curr_num != prev_num:
269
+ level_previous = previous.level
270
+ level_improved = curr_num > prev_num
271
+ else:
272
+ level_previous = None
273
+ level_improved = None
274
+
275
+ # Violations delta (None if unchanged)
276
+ viol_delta = current_violations - previous.violations_count
277
+ violations_delta = viol_delta if viol_delta != 0 else None
278
+
279
+ return cls(score_delta, level_previous, level_improved, violations_delta)
280
+
281
+
282
+ @dataclass(frozen=True)
283
+ class PendingSemantic:
284
+ """Summary of pending semantic rules for partial evaluation."""
285
+
286
+ rule_count: int # Number of semantic rules pending
287
+ file_count: int # Files with pending semantic checks
288
+ rules: tuple[str, ...] # Rule IDs (e.g., "C6", "C10")
289
+
290
+
291
+ @dataclass(frozen=True)
292
+ class ValidationResult:
293
+ """Complete validation output."""
294
+
295
+ score: float # 0.0-10.0 scale
296
+ level: Level # Capability level
297
+ violations: tuple[Violation, ...] # Immutable
298
+ judgment_requests: tuple[JudgmentRequest, ...]
299
+ rules_checked: int # Deterministic rules checked
300
+ rules_passed: int
301
+ rules_failed: int
302
+ feature_summary: str # Human-readable
303
+ friction: FrictionEstimate
304
+ # Evaluation completeness
305
+ is_partial: bool = True # True for CLI (pattern-only), False for MCP (includes semantic)
306
+ pending_semantic: PendingSemantic | None = None # Summary of pending semantic rules
307
+
308
+
309
+ @dataclass(frozen=True)
310
+ class InitResult:
311
+ """Result of initialization."""
312
+
313
+ success: bool
314
+ opengrep_path: Path | None
315
+ rules_path: Path | None
316
+ framework_version: str | None
317
+ errors: list[str] = field(default_factory=list)
318
+
319
+
320
+ @dataclass(frozen=True)
321
+ class UpdateResult:
322
+ """Result of framework update."""
323
+
324
+ success: bool
325
+ message: str
326
+ old_version: str | None = None
327
+ new_version: str | None = None
328
+ rules_path: Path | None = None
329
+ rules_count: int = 0
@@ -0,0 +1,34 @@
1
+ """OpenGrep integration module.
2
+
3
+ Public API for running OpenGrep and processing results.
4
+ """
5
+
6
+ from reporails_cli.core.opengrep.runner import (
7
+ get_rule_yml_paths,
8
+ run_capability_detection,
9
+ run_opengrep,
10
+ run_rule_validation,
11
+ set_debug_timing,
12
+ )
13
+ from reporails_cli.core.opengrep.templates import (
14
+ TEMPLATE_PATTERN,
15
+ has_templates,
16
+ resolve_templates,
17
+ )
18
+
19
+ # Backward-compatible alias
20
+ resolve_yml_templates = resolve_templates
21
+
22
+ __all__ = [
23
+ # Runner functions
24
+ "run_opengrep",
25
+ "run_capability_detection",
26
+ "run_rule_validation",
27
+ "get_rule_yml_paths",
28
+ "set_debug_timing",
29
+ # Template functions
30
+ "has_templates",
31
+ "resolve_templates",
32
+ "resolve_yml_templates", # Backward-compatible alias
33
+ "TEMPLATE_PATTERN",
34
+ ]
@@ -0,0 +1,203 @@
1
+ """OpenGrep binary execution.
2
+
3
+ Runs OpenGrep and returns parsed SARIF output.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import contextlib
9
+ import json
10
+ import logging
11
+ import subprocess
12
+ import sys
13
+ import time
14
+ from pathlib import Path
15
+ from tempfile import NamedTemporaryFile, TemporaryDirectory
16
+ from typing import Any
17
+
18
+ from reporails_cli.bundled import get_capability_patterns_path
19
+ from reporails_cli.core.bootstrap import get_opengrep_bin
20
+ from reporails_cli.core.models import Rule
21
+ from reporails_cli.core.opengrep.semgrepignore import ensure_semgrepignore
22
+ from reporails_cli.core.opengrep.templates import has_templates, resolve_templates
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+ # Module-level debug flag (set by CLI)
27
+ _debug_timing = False
28
+
29
+
30
+ def set_debug_timing(enabled: bool) -> None:
31
+ """Enable/disable timing output to stderr."""
32
+ global _debug_timing
33
+ _debug_timing = enabled
34
+
35
+
36
+ def _log_timing(label: str, elapsed_ms: float) -> None:
37
+ """Log timing info to stderr if debug enabled."""
38
+ if _debug_timing:
39
+ print(f"[perf] {label}: {elapsed_ms:.0f}ms", file=sys.stderr)
40
+
41
+
42
+ def run_opengrep(
43
+ yml_paths: list[Path],
44
+ target: Path,
45
+ opengrep_path: Path | None = None,
46
+ template_context: dict[str, str | list[str]] | None = None,
47
+ ) -> dict[str, Any]:
48
+ """Execute OpenGrep with specified rule configs.
49
+
50
+ Shells out to OpenGrep, returns parsed SARIF.
51
+
52
+ Args:
53
+ yml_paths: List of .yml rule config files (must exist)
54
+ target: Directory to scan
55
+ opengrep_path: Path to OpenGrep binary (optional, auto-detects)
56
+ template_context: Optional dict for resolving {{placeholder}} in yml files
57
+
58
+ Returns:
59
+ Parsed SARIF JSON output
60
+ """
61
+ start_time = time.perf_counter()
62
+
63
+ # Filter to only existing yml files - don't call OpenGrep with nothing to run
64
+ valid_paths = [p for p in yml_paths if p and p.exists()]
65
+ if not valid_paths:
66
+ return {"runs": []}
67
+
68
+ if opengrep_path is None:
69
+ opengrep_path = get_opengrep_bin()
70
+
71
+ if not opengrep_path.exists():
72
+ logger.warning("OpenGrep binary not found: %s", opengrep_path)
73
+ return {"runs": []}
74
+
75
+ # Create temp file for SARIF output
76
+ with NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
77
+ sarif_path = Path(f.name)
78
+
79
+ # Use temp directory for resolved yml files if context provided
80
+ temp_dir = TemporaryDirectory() if template_context else None
81
+
82
+ # Ensure .semgrepignore exists for performance
83
+ created_semgrepignore = ensure_semgrepignore(target)
84
+
85
+ try:
86
+ # Resolve templates if context provided
87
+ if template_context and temp_dir:
88
+ resolved_paths: list[Path] = []
89
+ for yml_path in valid_paths:
90
+ if has_templates(yml_path):
91
+ # Resolve and write to temp file
92
+ resolved_content = resolve_templates(yml_path, template_context)
93
+ temp_yml = Path(temp_dir.name) / yml_path.name
94
+ temp_yml.write_text(resolved_content, encoding="utf-8")
95
+ resolved_paths.append(temp_yml)
96
+ else:
97
+ # No templates, use original
98
+ resolved_paths.append(yml_path)
99
+ config_paths = resolved_paths
100
+ else:
101
+ config_paths = valid_paths
102
+
103
+ # Build command
104
+ cmd = [
105
+ str(opengrep_path),
106
+ "scan",
107
+ "--sarif",
108
+ f"--output={sarif_path}",
109
+ ]
110
+
111
+ # Add rule files
112
+ for yml_path in config_paths:
113
+ cmd.extend(["--config", str(yml_path)])
114
+
115
+ # Add target
116
+ cmd.append(str(target))
117
+
118
+ # Run OpenGrep
119
+ proc = subprocess.run(cmd, capture_output=True, check=False)
120
+
121
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
122
+ _log_timing(f"opengrep ({len(config_paths)} rules)", elapsed_ms)
123
+
124
+ # Check for errors (0 = no findings, 1 = findings found)
125
+ if proc.returncode not in (0, 1):
126
+ logger.warning(
127
+ "OpenGrep failed with code %d: %s",
128
+ proc.returncode,
129
+ proc.stderr.decode("utf-8", errors="replace")[:500],
130
+ )
131
+ return {"runs": []}
132
+
133
+ # Parse SARIF output
134
+ if sarif_path.exists():
135
+ try:
136
+ result: dict[str, Any] = json.loads(sarif_path.read_text(encoding="utf-8"))
137
+ return result
138
+ except json.JSONDecodeError as e:
139
+ logger.warning("Invalid SARIF output from OpenGrep: %s", e)
140
+ return {"runs": []}
141
+ return {"runs": []}
142
+
143
+ finally:
144
+ if sarif_path.exists():
145
+ sarif_path.unlink()
146
+ if temp_dir:
147
+ temp_dir.cleanup()
148
+ # Clean up created .semgrepignore
149
+ if created_semgrepignore and created_semgrepignore.exists():
150
+ with contextlib.suppress(OSError):
151
+ created_semgrepignore.unlink()
152
+
153
+
154
+ def run_capability_detection(target: Path) -> dict[str, Any]:
155
+ """Run capability detection using bundled patterns.
156
+
157
+ Uses bundled capability-patterns.yml for content analysis.
158
+
159
+ Args:
160
+ target: Directory to scan
161
+
162
+ Returns:
163
+ Parsed SARIF JSON output
164
+ """
165
+ patterns_path = get_capability_patterns_path()
166
+ if not patterns_path.exists():
167
+ logger.warning("Capability patterns not found: %s", patterns_path)
168
+ return {"runs": []}
169
+
170
+ return run_opengrep([patterns_path], target)
171
+
172
+
173
+ def run_rule_validation(rules: dict[str, Rule], target: Path) -> dict[str, Any]:
174
+ """Run rule validation using rule .yml files.
175
+
176
+ Args:
177
+ rules: Dict of rules with yml_path set
178
+ target: Directory to scan
179
+
180
+ Returns:
181
+ Parsed SARIF JSON output
182
+ """
183
+ yml_paths = get_rule_yml_paths(rules)
184
+ if not yml_paths:
185
+ # No patterns to run - this is normal, not an error
186
+ return {"runs": []}
187
+
188
+ return run_opengrep(yml_paths, target)
189
+
190
+
191
+ def get_rule_yml_paths(rules: dict[str, Rule]) -> list[Path]:
192
+ """Get list of .yml paths for rules that have them and exist.
193
+
194
+ Args:
195
+ rules: Dict of rules
196
+
197
+ Returns:
198
+ List of paths to existing .yml files
199
+ """
200
+ return [
201
+ r.yml_path for r in rules.values()
202
+ if r.yml_path is not None and r.yml_path.exists()
203
+ ]
@@ -0,0 +1,39 @@
1
+ """Semgrepignore file handling.
2
+
3
+ Manages .semgrepignore files for OpenGrep performance.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import shutil
9
+ from pathlib import Path
10
+
11
+ from reporails_cli.bundled import get_semgrepignore_path
12
+
13
+
14
+ def ensure_semgrepignore(target: Path) -> Path | None:
15
+ """Ensure .semgrepignore exists in target directory.
16
+
17
+ If no .semgrepignore exists, copies the bundled default.
18
+ Returns path to created file if created, None if already exists.
19
+
20
+ Args:
21
+ target: Target directory to scan
22
+
23
+ Returns:
24
+ Path to created .semgrepignore if created, None otherwise
25
+ """
26
+ target_dir = target if target.is_dir() else target.parent
27
+ existing = target_dir / ".semgrepignore"
28
+ if existing.exists():
29
+ return None
30
+
31
+ # Copy bundled .semgrepignore to target directory
32
+ bundled = get_semgrepignore_path()
33
+ if bundled.exists():
34
+ try:
35
+ shutil.copy(bundled, existing)
36
+ return existing # Return so caller can clean up
37
+ except OSError:
38
+ pass
39
+ return None