@demig0d2/skills 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +111 -0
  3. package/bin/cli.js +313 -0
  4. package/package.json +44 -0
  5. package/skills/book-writer/SKILL.md +1396 -0
  6. package/skills/book-writer/references/kdp_specs.md +139 -0
  7. package/skills/book-writer/scripts/kdp_check.py +255 -0
  8. package/skills/book-writer/scripts/toc_extract.py +151 -0
  9. package/skills/book-writer/scripts/word_count.py +196 -0
  10. package/skills/chapter-auditor/SKILL.md +231 -0
  11. package/skills/chapter-auditor/scripts/score_report.py +237 -0
  12. package/skills/concept-expander/SKILL.md +170 -0
  13. package/skills/concept-expander/scripts/validate_concept.py +255 -0
  14. package/skills/continuity-tracker/SKILL.md +251 -0
  15. package/skills/continuity-tracker/references/log_schema.md +149 -0
  16. package/skills/continuity-tracker/scripts/conflict_check.py +179 -0
  17. package/skills/continuity-tracker/scripts/log_manager.py +258 -0
  18. package/skills/humanizer/SKILL.md +632 -0
  19. package/skills/humanizer/references/patterns_quick_ref.md +71 -0
  20. package/skills/humanizer/scripts/dna_scan.py +168 -0
  21. package/skills/humanizer/scripts/scan_ai_patterns.py +279 -0
  22. package/skills/overhaul/SKILL.md +697 -0
  23. package/skills/overhaul/references/upgrade_checklist.md +81 -0
  24. package/skills/overhaul/scripts/changelog_gen.py +183 -0
  25. package/skills/overhaul/scripts/skill_parser.py +265 -0
  26. package/skills/overhaul/scripts/version_bump.py +128 -0
  27. package/skills/research-aggregator/SKILL.md +194 -0
  28. package/skills/research-aggregator/references/thinkers_reference.md +104 -0
  29. package/skills/research-aggregator/scripts/bank_formatter.py +206 -0
@@ -0,0 +1,71 @@
1
+ # AI Pattern Quick Reference
2
+
3
+ All 25 documented AI writing patterns from Wikipedia's "Signs of AI Writing" guide.
4
+ Used by `scan_ai_patterns.py` and the humanizer skill.
5
+
6
+ ---
7
+
8
+ ## CONTENT PATTERNS (1–6)
9
+
10
+ | # | Pattern | Key Trigger Words |
11
+ |---|---------|------------------|
12
+ | 1 | Significance Inflation | stands/serves as, testament to, pivotal moment, underscores, evolving landscape |
13
+ | 2 | Notability Claims | national media outlets, active social media presence, leading expert |
14
+ | 3 | Superficial -ing Endings | highlighting..., symbolizing..., contributing to..., showcasing... |
15
+ | 4 | Promotional Language | vibrant, nestled, breathtaking, groundbreaking, renowned, stunning |
16
+ | 5 | Vague Attributions | experts argue, observers say, many believe, several sources |
17
+ | 6 | Challenges/Future Sections | Despite challenges... continues to thrive, Future Outlook |
18
+
19
+ ## LANGUAGE & GRAMMAR (7–12)
20
+
21
+ | # | Pattern | Key Trigger Words |
22
+ |---|---------|------------------|
23
+ | 7 | AI Vocabulary | delve, tapestry, multifaceted, nuanced, embark, realm, foster, transformative |
24
+ | 8 | Copula Avoidance | serves as, stands as, represents a, boasts, features, offers a |
25
+ | 9 | Negative Parallelism | "It's not just X; it's Y" / "Not merely X, but Y" |
26
+ | 10 | Rule of Three | Exactly 3 abstract parallel items used as rhetorical device |
27
+ | 11 | Synonym Cycling | Excessive substitution: protagonist → main character → central figure |
28
+ | 12 | False Ranges | "from X to Y, from A to B" where items aren't on a real scale |
29
+
30
+ ## STYLE PATTERNS (13–18)
31
+
32
+ | # | Pattern | Key Trigger Words |
33
+ |---|---------|------------------|
34
+ | 13 | Em Dash Overuse | — used as default connector more than 2× per page |
35
+ | 14 | Boldface Overuse | **Bold** applied mechanically in prose |
36
+ | 15 | Inline-Header Lists | **Header:** description as bullet items |
37
+ | 16 | Title Case in Headings | Every Main Word Capitalized In Heading |
38
+ | 17 | Emojis | 🚀 💡 ✅ in headings or bullets |
39
+ | 18 | Curly Quotes | "smart" curly quotes in manuscripts |
40
+
41
+ ## COMMUNICATION PATTERNS (19–21)
42
+
43
+ | # | Pattern | Key Trigger Words |
44
+ |---|---------|------------------|
45
+ | 19 | Chatbot Artifacts | I hope this helps, Let me know, Of course!, Feel free to |
46
+ | 20 | Knowledge-Cutoff Disclaimers | "as of [date]", "while specific details are limited" |
47
+ | 21 | Sycophantic Tone | Great question!, You're absolutely right!, That's excellent |
48
+
49
+ ## FILLER & HEDGING (22–25)
50
+
51
+ | # | Pattern | Key Trigger Words |
52
+ |---|---------|------------------|
53
+ | 22 | Filler Phrases | In order to, Due to the fact that, At this point in time |
54
+ | 23 | Hedging Overload | could potentially possibly be argued that might |
55
+ | 24 | Generic Positive Conclusions | The future looks bright, Exciting times ahead |
56
+ | 25 | Hyphen Overuse | cross-functional, data-driven, decision-making, well-known |
57
+
58
+ ---
59
+
60
+ ## PROTECTED — NEVER STRIP (Vivid's DNA)
61
+
62
+ These constructions superficially match patterns above but are protected:
63
+
64
+ | Construction | Why Protected | Strip Instead |
65
+ |---|---|---|
66
+ | "Not the silence, but what it forced me to face" | Grounded in specific experience | "Not just X; it's Y" (abstract) |
67
+ | Parallel phrases building to emotional peak | Earned accumulation | Generic 3-item lists |
68
+ | Em dash around emotionally significant phrase | Rhythmic weight | Em dash as default connector |
69
+ | "But here's..." / "The hardest part wasn't..." | Signature phrases | Generic pivots |
70
+ | insidious, agonizing, desolate, suffocating | Name felt experience | vibrant, groundbreaking, stunning |
71
+ | Physical imagery: "pressed down on my chest" | Specific + grounded | Recycled: journey, storm, burden |
@@ -0,0 +1,168 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ dna_scan.py — Scan text for Vivid's protected DNA constructions
4
+
5
+ Identifies Vivid's authentic signature constructions that must NOT be
6
+ stripped by the humanizer. Marks them so the pattern elimination pass
7
+ knows to skip them.
8
+
9
+ Usage:
10
+ python dna_scan.py <input_file>
11
+ python dna_scan.py <input_file> --json
12
+ """
13
+
14
+ import sys
15
+ import re
16
+ import json
17
+ from pathlib import Path
18
+
19
+ # ─── DNA Constructions to Protect ────────────────────────────────────────────
20
+
21
+ DNA_RULES = [
22
+ {
23
+ "name": "Signature Phrases",
24
+ "description": "Vivid's recurring voice markers — never strip",
25
+ "patterns": [
26
+ r"\bbut here'?s\b",
27
+ r"\bthe hardest part wasn'?t\b",
28
+ r"\bthat is what .{3,40} did to me from the inside\b",
29
+ r"\band you start to see\b",
30
+ ],
31
+ },
32
+ {
33
+ "name": "Protected Negative Parallelisms",
34
+ "description": "Grounded in specific experience — not generic contrast",
35
+ "patterns": [
36
+ r"\bnot the .{5,60}, but what\b",
37
+ r"\bnot the .{5,60}, but the\b",
38
+ ],
39
+ "note": "Only protect if both sides reference concrete, specific experience",
40
+ },
41
+ {
42
+ "name": "Protected Em Dashes (Emotional Weight)",
43
+ "description": "Em dashes around emotionally significant phrases",
44
+ "patterns": [
45
+ r"— that .{3,50} —",
46
+ r"— [a-z].{3,50} —",
47
+ ],
48
+ "note": "Only protect when em dash is around a named emotional moment",
49
+ },
50
+ {
51
+ "name": "Physical Imagery",
52
+ "description": "Vivid's grounded physical metaphors — never flatten",
53
+ "patterns": [
54
+ r"\bpressed down on my chest\b",
55
+ r"\bknife that cut\b",
56
+ r"\bscraps of attention\b",
57
+ r"\binvisible chains\b",
58
+ r"\bdeafening scream inside\b",
59
+ r"\bsuffocating grip\b",
60
+ r"\bunraveling\b",
61
+ ],
62
+ },
63
+ {
64
+ "name": "Protected Elevated Vocabulary",
65
+ "description": "Elevated words used to name felt experience — not decoration",
66
+ "patterns": [
67
+ r"\binsidious\b", r"\bimperceptibly\b", r"\bagonizing\b",
68
+ r"\bdesolate\b", r"\bconspicuously\b", r"\bgrotesque\b",
69
+ r"\bincessant\b", r"\bsuffocating\b",
70
+ ],
71
+ "note": "Protect only when modifying a specific felt experience",
72
+ },
73
+ {
74
+ "name": "Reframe Closings",
75
+ "description": "Single-sentence reframe closings — core Vivid structure",
76
+ "patterns": [
77
+ r"^[A-Z].{20,120}\.$", # standalone short paragraph (reframe closing)
78
+ ],
79
+ "note": "Flag for review — Claude determines if it's a reframe closing",
80
+ },
81
+ ]
82
+
83
+ # ─── Scanner ──────────────────────────────────────────────────────────────────
84
+
85
+ def scan_dna(filepath: str) -> dict:
86
+ path = Path(filepath)
87
+ if not path.exists():
88
+ print(f"Error: File not found: {filepath}", file=sys.stderr)
89
+ sys.exit(1)
90
+
91
+ text = path.read_text(encoding="utf-8")
92
+ lines = text.splitlines()
93
+
94
+ results = {
95
+ "file": str(path),
96
+ "total_lines": len(lines),
97
+ "protected_constructions": [],
98
+ "total_protected": 0,
99
+ }
100
+
101
+ for rule in DNA_RULES:
102
+ rule_findings = []
103
+ for line_num, line in enumerate(lines, start=1):
104
+ for pattern in rule["patterns"]:
105
+ if re.search(pattern, line, re.IGNORECASE):
106
+ rule_findings.append({
107
+ "line": line_num,
108
+ "text": line.strip()[:120],
109
+ "note": rule.get("note", ""),
110
+ })
111
+ break # one match per line per rule is enough
112
+
113
+ if rule_findings:
114
+ results["protected_constructions"].append({
115
+ "rule": rule["name"],
116
+ "description": rule["description"],
117
+ "count": len(rule_findings),
118
+ "instances": rule_findings,
119
+ })
120
+ results["total_protected"] += len(rule_findings)
121
+
122
+ return results
123
+
124
+
125
+ def print_dna_report(results: dict):
126
+ print(f"\n{'═' * 60}")
127
+ print(f" DNA PROTECTION SCAN")
128
+ print(f" File: {results['file']}")
129
+ print(f"{'═' * 60}")
130
+
131
+ if results["total_protected"] == 0:
132
+ print("\n No protected DNA constructions found.\n")
133
+ print(" This is normal for non-Vivid text or early drafts.\n")
134
+ return
135
+
136
+ print(f"\n Protected constructions found: {results['total_protected']}")
137
+ print(f" These will be EXEMPT from AI pattern elimination.\n")
138
+
139
+ for item in results["protected_constructions"]:
140
+ print(f" {'─' * 56}")
141
+ print(f" ✓ PROTECT: {item['rule']} ({item['count']} instance{'s' if item['count'] > 1 else ''})")
142
+ print(f" {item['description']}")
143
+ for inst in item["instances"][:3]:
144
+ print(f" Line {inst['line']:>4}: {inst['text'][:90]}")
145
+ if inst.get("note"):
146
+ print(f" Note: {inst['note']}")
147
+
148
+ print(f"\n{'═' * 60}")
149
+ print(f" Mark these {results['total_protected']} line(s) as protected")
150
+ print(f" before running scan_ai_patterns.py.\n")
151
+
152
+
153
+ # ─── Main ─────────────────────────────────────────────────────────────────────
154
+
155
+ if __name__ == "__main__":
156
+ if len(sys.argv) < 2:
157
+ print("Usage: python dna_scan.py <input_file> [--json]")
158
+ sys.exit(1)
159
+
160
+ filepath = sys.argv[1]
161
+ mode = sys.argv[2] if len(sys.argv) > 2 else "--report"
162
+
163
+ results = scan_dna(filepath)
164
+
165
+ if mode == "--json":
166
+ print(json.dumps(results, indent=2))
167
+ else:
168
+ print_dna_report(results)
@@ -0,0 +1,279 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ scan_ai_patterns.py — Scan text for AI writing patterns
4
+
5
+ Usage:
6
+ python scan_ai_patterns.py <input_file>
7
+ python scan_ai_patterns.py <input_file> --json
8
+ python scan_ai_patterns.py <input_file> --summary
9
+
10
+ Output:
11
+ Human-readable report of all AI patterns found, with line numbers.
12
+ Use --json for machine-readable output (for audit scripts).
13
+ Use --summary for a count-only view.
14
+ """
15
+
16
+ import sys
17
+ import re
18
+ import json
19
+ from pathlib import Path
20
+
21
+ # ─── Pattern Definitions ─────────────────────────────────────────────────────
22
+
23
+ PATTERNS = {
24
+ "significance_inflation": {
25
+ "label": "Pattern 1 — Significance Inflation",
26
+ "terms": [
27
+ r"\bstands as\b", r"\bserves as\b", r"\bmarks a\b", r"\btestament to\b",
28
+ r"\ba reminder that\b", r"\bpivotal moment\b", r"\bcrucial role\b",
29
+ r"\bvital role\b", r"\bunderscores\b", r"\bhighlights its importance\b",
30
+ r"\breflects broader\b", r"\bsetting the stage for\b", r"\bevolving landscape\b",
31
+ r"\bindel[i]ble mark\b", r"\bdeeply rooted\b", r"\bkey turning point\b",
32
+ r"\bfocal point\b", r"\bmarks a shift\b", r"\bshaping the\b",
33
+ ],
34
+ },
35
+ "promotional_language": {
36
+ "label": "Pattern 4 — Promotional Language",
37
+ "terms": [
38
+ r"\bboasts\b", r"\bvibrant\b", r"\bnestled\b", r"\bbreathtaking\b",
39
+ r"\bgroundbreaking\b", r"\brenowned\b", r"\bstunning\b", r"\bmust-visit\b",
40
+ r"\bmust-read\b", r"\bshowcasing\b", r"\bexemplifies\b",
41
+ ],
42
+ },
43
+ "ing_tack_ons": {
44
+ "label": "Pattern 3 — Superficial -ing Endings",
45
+ "terms": [
46
+ r"\bhighlighting\b", r"\bunderscoring\b", r"\bemphasizing\b",
47
+ r"\bensuring\b", r"\breflecting\b", r"\bsymbolizing\b",
48
+ r"\bcontributing to\b", r"\bcultivating\b", r"\bfostering\b",
49
+ r"\bshowcasing\b", r"\bencompassing\b",
50
+ ],
51
+ },
52
+ "ai_vocabulary": {
53
+ "label": "Pattern 7 — AI Vocabulary Words",
54
+ "terms": [
55
+ r"\bdelve\b", r"\btapestry\b", r"\bmultifaceted\b", r"\bnuanced\b",
56
+ r"\bembark\b", r"\brealm\b", r"\bfoster\b", r"\belevate\b",
57
+ r"\bleverage\b", r"\bnavigate\b", r"\bunpack\b", r"\bholistic\b",
58
+ r"\bsynergy\b", r"\btransformative\b", r"\bimpactful\b", r"\brobust\b",
59
+ r"\bpivotal\b", r"\btestament\b", r"\blandscape\b", r"\bgarner\b",
60
+ r"\bintricate\b", r"\binterplay\b",
61
+ ],
62
+ },
63
+ "copula_avoidance": {
64
+ "label": "Pattern 8 — Copula Avoidance",
65
+ "terms": [
66
+ r"\bserves as\b", r"\bstands as\b", r"\brepresents a\b", r"\bboasts\b",
67
+ r"\bfeatures\b", r"\boffers a\b",
68
+ ],
69
+ },
70
+ "vague_attributions": {
71
+ "label": "Pattern 5 — Vague Attributions",
72
+ "terms": [
73
+ r"\bexperts (say|argue|believe|suggest)\b",
74
+ r"\bindustry (reports|observers)\b",
75
+ r"\bmany believe\b", r"\bsome critics argue\b",
76
+ r"\bit is widely believed\b", r"\bobservers (have|say)\b",
77
+ ],
78
+ },
79
+ "filler_phrases": {
80
+ "label": "Pattern 22 — Filler Phrases",
81
+ "terms": [
82
+ r"\bin order to\b", r"\bdue to the fact that\b", r"\bat this point in time\b",
83
+ r"\bin the event that\b", r"\bhas the ability to\b",
84
+ r"\bit is important to note\b", r"\bat its core\b",
85
+ r"\bin today's world\b", r"\bin conclusion\b",
86
+ r"\bto summarize\b", r"\bit goes without saying\b",
87
+ r"\bneedless to say\b",
88
+ ],
89
+ },
90
+ "hedging_overload": {
91
+ "label": "Pattern 23 — Hedging Overload",
92
+ "terms": [
93
+ r"\bcould potentially\b", r"\bmight possibly\b",
94
+ r"\bit could be argued\b", r"\bpotentially possibly\b",
95
+ ],
96
+ },
97
+ "generic_conclusions": {
98
+ "label": "Pattern 24 — Generic Positive Conclusions",
99
+ "terms": [
100
+ r"\bthe future (looks|is) bright\b", r"\bexciting times (lie |)ahead\b",
101
+ r"\bthis is just the beginning\b", r"\bcontinue this journey\b",
102
+ r"\bthe possibilities are endless\b", r"\btoward excellence\b",
103
+ ],
104
+ },
105
+ "chatbot_artifacts": {
106
+ "label": "Pattern 19 — Chatbot Artifacts",
107
+ "terms": [
108
+ r"\bgreat question\b", r"\bi hope this helps\b",
109
+ r"\blet me know if\b", r"\bfeel free to\b",
110
+ r"\bof course!\b", r"\bcertainly!\b",
111
+ r"\byou'?re absolutely right\b",
112
+ ],
113
+ },
114
+ "em_dash_overuse": {
115
+ "label": "Pattern 13 — Em Dash Overuse",
116
+ "terms": [r"—"],
117
+ "count_threshold": 3, # flag only if more than N per paragraph
118
+ },
119
+ "negative_parallelism": {
120
+ "label": "Pattern 9 — Negative Parallelism",
121
+ "terms": [
122
+ r"\bit'?s not just (about |)\b.*?it'?s\b",
123
+ r"\bnot (only|merely|just)\b.*?\bbut\b",
124
+ ],
125
+ },
126
+ "false_ranges": {
127
+ "label": "Pattern 12 — False Ranges",
128
+ "terms": [
129
+ r"\bfrom .{5,40} to .{5,40}, from\b",
130
+ ],
131
+ },
132
+ "hyphen_overuse": {
133
+ "label": "Pattern 25 — Hyphenated Word Pair Overuse",
134
+ "terms": [
135
+ r"\bcross-functional\b", r"\bdata-driven\b", r"\bclient-facing\b",
136
+ r"\bdecision-making\b", r"\bwell-known\b", r"\bhigh-quality\b",
137
+ r"\breal-time\b", r"\blong-term\b", r"\bend-to-end\b",
138
+ r"\bthird-party\b",
139
+ ],
140
+ },
141
+ }
142
+
143
+ # ─── DNA Protection Terms (never flag these) ─────────────────────────────────
144
+
145
+ DNA_PROTECTED_PHRASES = [
146
+ "but here's",
147
+ "the hardest part wasn't",
148
+ "that is what",
149
+ "did to me from the inside",
150
+ "and you start to see",
151
+ ]
152
+
153
+ # ─── Scanner ──────────────────────────────────────────────────────────────────
154
+
155
+ def is_dna_protected(line: str) -> bool:
156
+ line_lower = line.lower()
157
+ return any(phrase in line_lower for phrase in DNA_PROTECTED_PHRASES)
158
+
159
+
160
+ def scan_file(filepath: str) -> dict:
161
+ path = Path(filepath)
162
+ if not path.exists():
163
+ print(f"Error: File not found: {filepath}", file=sys.stderr)
164
+ sys.exit(1)
165
+
166
+ text = path.read_text(encoding="utf-8")
167
+ lines = text.splitlines()
168
+
169
+ results = {
170
+ "file": str(path),
171
+ "total_lines": len(lines),
172
+ "total_words": len(text.split()),
173
+ "findings": [],
174
+ "pattern_counts": {},
175
+ "total_flags": 0,
176
+ "clean": True,
177
+ }
178
+
179
+ for pattern_key, pattern_data in PATTERNS.items():
180
+ label = pattern_data["label"]
181
+ terms = pattern_data["terms"]
182
+ threshold = pattern_data.get("count_threshold", 1)
183
+ pattern_findings = []
184
+
185
+ for line_num, line in enumerate(lines, start=1):
186
+ if is_dna_protected(line):
187
+ continue
188
+
189
+ line_lower = line.lower()
190
+ matched_terms = []
191
+
192
+ for term in terms:
193
+ matches = re.findall(term, line_lower)
194
+ if matches:
195
+ matched_terms.extend(matches)
196
+
197
+ if matched_terms:
198
+ # For em dash, only flag if count exceeds threshold in paragraph
199
+ if pattern_key == "em_dash_overuse":
200
+ count = line.count("—")
201
+ if count < threshold:
202
+ continue
203
+
204
+ pattern_findings.append({
205
+ "line": line_num,
206
+ "text": line.strip()[:120],
207
+ "matched": list(set(matched_terms)),
208
+ })
209
+
210
+ if pattern_findings:
211
+ results["findings"].append({
212
+ "pattern": label,
213
+ "key": pattern_key,
214
+ "count": len(pattern_findings),
215
+ "instances": pattern_findings,
216
+ })
217
+ results["pattern_counts"][pattern_key] = len(pattern_findings)
218
+ results["total_flags"] += len(pattern_findings)
219
+ results["clean"] = False
220
+
221
+ return results
222
+
223
+
224
+ def print_report(results: dict):
225
+ print(f"\n{'═' * 60}")
226
+ print(f" AI PATTERN SCAN REPORT")
227
+ print(f" File: {results['file']}")
228
+ print(f" Words: {results['total_words']:,} | Lines: {results['total_lines']:,}")
229
+ print(f"{'═' * 60}")
230
+
231
+ if results["clean"]:
232
+ print("\n ✓ No AI patterns detected. Text is clean.\n")
233
+ return
234
+
235
+ print(f"\n Total flags: {results['total_flags']}")
236
+ print(f" Patterns triggered: {len(results['findings'])}\n")
237
+
238
+ for finding in results["findings"]:
239
+ print(f" {'─' * 56}")
240
+ print(f" {finding['pattern']} ({finding['count']} instance{'s' if finding['count'] > 1 else ''})")
241
+ for instance in finding["instances"][:5]: # cap at 5 per pattern
242
+ print(f" Line {instance['line']:>4}: {instance['text'][:90]}")
243
+ print(f" Matched: {', '.join(instance['matched'][:3])}")
244
+ if len(finding["instances"]) > 5:
245
+ print(f" ... and {len(finding['instances']) - 5} more")
246
+
247
+ print(f"\n{'═' * 60}")
248
+ print(f" Run humanizer pass to fix {results['total_flags']} flag(s).\n")
249
+
250
+
251
+ def print_summary(results: dict):
252
+ print(f"\n {results['file']} — {results['total_words']:,} words")
253
+ if results["clean"]:
254
+ print(" ✓ Clean — no AI patterns found\n")
255
+ return
256
+ print(f" ✗ {results['total_flags']} flags across {len(results['findings'])} patterns\n")
257
+ for finding in results["findings"]:
258
+ print(f" {str(finding['count']).rjust(3)}x {finding['pattern']}")
259
+ print()
260
+
261
+
262
+ # ─── Main ─────────────────────────────────────────────────────────────────────
263
+
264
+ if __name__ == "__main__":
265
+ if len(sys.argv) < 2:
266
+ print("Usage: python scan_ai_patterns.py <input_file> [--json|--summary]")
267
+ sys.exit(1)
268
+
269
+ filepath = sys.argv[1]
270
+ mode = sys.argv[2] if len(sys.argv) > 2 else "--report"
271
+
272
+ results = scan_file(filepath)
273
+
274
+ if mode == "--json":
275
+ print(json.dumps(results, indent=2))
276
+ elif mode == "--summary":
277
+ print_summary(results)
278
+ else:
279
+ print_report(results)