@demig0d2/skills 1.0.2 → 1.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +58 -67
- package/bin/cli.js +7 -3
- package/package.json +2 -5
- package/skills/book-creator/SKILL.md +848 -0
- package/skills/book-creator/references/kdp_specs.md +139 -0
- package/skills/book-creator/references/log_schema.md +149 -0
- package/skills/book-creator/references/patterns_quick_ref.md +71 -0
- package/skills/book-creator/references/thinkers_reference.md +104 -0
- package/skills/book-creator/scripts/__pycache__/bank_formatter.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/conflict_check.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/dna_scan.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/kdp_check.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/log_manager.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/scan_ai_patterns.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/score_report.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/toc_extract.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/validate_concept.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/word_count.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/bank_formatter.py +206 -0
- package/skills/book-creator/scripts/conflict_check.py +179 -0
- package/skills/book-creator/scripts/dna_scan.py +168 -0
- package/skills/book-creator/scripts/kdp_check.py +255 -0
- package/skills/book-creator/scripts/log_manager.py +258 -0
- package/skills/book-creator/scripts/scan_ai_patterns.py +279 -0
- package/skills/book-creator/scripts/score_report.py +237 -0
- package/skills/book-creator/scripts/toc_extract.py +151 -0
- package/skills/book-creator/scripts/validate_concept.py +255 -0
- package/skills/book-creator/scripts/word_count.py +196 -0
- package/skills/book-writer/scripts/__pycache__/kdp_check.cpython-312.pyc +0 -0
- package/skills/book-writer/scripts/__pycache__/toc_extract.cpython-312.pyc +0 -0
- package/skills/book-writer/scripts/__pycache__/word_count.cpython-312.pyc +0 -0
- package/skills/book-writer.zip +0 -0
- package/skills/chapter-auditor/scripts/__pycache__/score_report.cpython-312.pyc +0 -0
- package/skills/concept-expander/scripts/__pycache__/validate_concept.cpython-312.pyc +0 -0
- package/skills/continuity-tracker/scripts/__pycache__/conflict_check.cpython-312.pyc +0 -0
- package/skills/continuity-tracker/scripts/__pycache__/log_manager.cpython-312.pyc +0 -0
- package/skills/humanizer/scripts/__pycache__/dna_scan.cpython-312.pyc +0 -0
- package/skills/humanizer/scripts/__pycache__/scan_ai_patterns.cpython-312.pyc +0 -0
- package/skills/overhaul/scripts/__pycache__/changelog_gen.cpython-312.pyc +0 -0
- package/skills/overhaul/scripts/__pycache__/skill_parser.cpython-312.pyc +0 -0
- package/skills/overhaul/scripts/__pycache__/version_bump.cpython-312.pyc +0 -0
- package/skills/overhaul.zip +0 -0
- package/skills/research-aggregator/scripts/__pycache__/bank_formatter.cpython-312.pyc +0 -0
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
validate_concept.py — Validate a concept document for completeness
|
|
4
|
+
|
|
5
|
+
Checks that a concept document has all required fields before
|
|
6
|
+
it enters the book-writer pipeline. Scores completeness and
|
|
7
|
+
flags missing or thin sections.
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
python validate_concept.py <concept.md>
|
|
11
|
+
python validate_concept.py <concept.md> --json
|
|
12
|
+
python validate_concept.py <concept.md> --strict
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import sys
|
|
16
|
+
import re
|
|
17
|
+
import json
|
|
18
|
+
import argparse
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
# ─── Required Fields ──────────────────────────────────────────────────────────
|
|
22
|
+
|
|
23
|
+
REQUIRED_FIELDS = [
|
|
24
|
+
{
|
|
25
|
+
"key": "working_title",
|
|
26
|
+
"patterns": [r"WORKING TITLE:", r"Title:"],
|
|
27
|
+
"min_words": 2,
|
|
28
|
+
"weight": "required",
|
|
29
|
+
"label": "Working title",
|
|
30
|
+
},
|
|
31
|
+
{
|
|
32
|
+
"key": "logline",
|
|
33
|
+
"patterns": [r"LOGLINE", r"One[- ]sentence", r"Premise:"],
|
|
34
|
+
"min_words": 10,
|
|
35
|
+
"max_words": 30,
|
|
36
|
+
"weight": "required",
|
|
37
|
+
"label": "Logline (≤25 words)",
|
|
38
|
+
},
|
|
39
|
+
{
|
|
40
|
+
"key": "core_question",
|
|
41
|
+
"patterns": [r"CORE QUESTION", r"THE CORE QUESTION", r"Central question"],
|
|
42
|
+
"min_words": 8,
|
|
43
|
+
"weight": "required",
|
|
44
|
+
"label": "Core question",
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"key": "reader",
|
|
48
|
+
"patterns": [r"THE READER", r"Reader:", r"Audience:"],
|
|
49
|
+
"min_words": 15,
|
|
50
|
+
"weight": "required",
|
|
51
|
+
"label": "Reader profile",
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
"key": "promise",
|
|
55
|
+
"patterns": [r"THE PROMISE", r"Promise:", r"What they gain"],
|
|
56
|
+
"min_words": 10,
|
|
57
|
+
"weight": "required",
|
|
58
|
+
"label": "The promise",
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
"key": "unique_angle",
|
|
62
|
+
"patterns": [r"UNIQUE ANGLE", r"THE UNIQUE ANGLE", r"What makes this different"],
|
|
63
|
+
"min_words": 15,
|
|
64
|
+
"weight": "required",
|
|
65
|
+
"label": "Unique angle",
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
"key": "central_tension",
|
|
69
|
+
"patterns": [r"CENTRAL TENSION", r"PARADOX", r"The tension"],
|
|
70
|
+
"min_words": 8,
|
|
71
|
+
"weight": "required",
|
|
72
|
+
"label": "Central tension / paradox",
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
"key": "emotional_arc",
|
|
76
|
+
"patterns": [r"EMOTIONAL ARC", r"Arc:", r"Start:", r"Journey:"],
|
|
77
|
+
"min_words": 20,
|
|
78
|
+
"weight": "required",
|
|
79
|
+
"label": "Emotional arc",
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
"key": "thematic_clusters",
|
|
83
|
+
"patterns": [r"THEMATIC CLUSTERS", r"Themes:", r"Chapter territories"],
|
|
84
|
+
"min_words": 30,
|
|
85
|
+
"weight": "required",
|
|
86
|
+
"label": "Thematic clusters (7–10)",
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
"key": "comparable_books",
|
|
90
|
+
"patterns": [r"COMPARABLE BOOKS", r"Comp titles", r"Similar books"],
|
|
91
|
+
"min_words": 10,
|
|
92
|
+
"weight": "recommended",
|
|
93
|
+
"label": "Comparable books",
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
"key": "risks",
|
|
97
|
+
"patterns": [r"RISKS", r"BLIND SPOTS", r"Risks:"],
|
|
98
|
+
"min_words": 10,
|
|
99
|
+
"weight": "recommended",
|
|
100
|
+
"label": "Risks / blind spots",
|
|
101
|
+
},
|
|
102
|
+
{
|
|
103
|
+
"key": "genre",
|
|
104
|
+
"patterns": [r"GENRE:", r"Genre:"],
|
|
105
|
+
"min_words": 1,
|
|
106
|
+
"weight": "required",
|
|
107
|
+
"label": "Genre",
|
|
108
|
+
},
|
|
109
|
+
]
|
|
110
|
+
|
|
111
|
+
# Banned logline words
|
|
112
|
+
BANNED_LOGLINE_WORDS = ["journey", "transformation", "discover", "explore", "delve", "embark"]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def find_field(text: str, field: dict) -> tuple[bool, str, int]:
|
|
116
|
+
"""Return (found, extracted_text, word_count)."""
|
|
117
|
+
for pattern in field["patterns"]:
|
|
118
|
+
match = re.search(pattern, text, re.IGNORECASE)
|
|
119
|
+
if match:
|
|
120
|
+
# Extract text after this heading until next all-caps heading or end
|
|
121
|
+
start = match.end()
|
|
122
|
+
remainder = text[start:start + 600]
|
|
123
|
+
# Stop at next heading-like line
|
|
124
|
+
stop = re.search(r"\n[A-Z]{3,}[\s:]", remainder)
|
|
125
|
+
extracted = remainder[:stop.start() if stop else 400].strip()
|
|
126
|
+
words = len(extracted.split())
|
|
127
|
+
return True, extracted, words
|
|
128
|
+
return False, "", 0
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def validate_logline(text: str) -> list:
|
|
132
|
+
"""Check logline specifically for quality."""
|
|
133
|
+
issues = []
|
|
134
|
+
logline_match = re.search(r"LOGLINE[^\n]*\n(.{10,200})", text, re.IGNORECASE)
|
|
135
|
+
if logline_match:
|
|
136
|
+
logline = logline_match.group(1).strip()
|
|
137
|
+
words = len(logline.split())
|
|
138
|
+
if words > 25:
|
|
139
|
+
issues.append(f"Logline is {words} words — should be ≤25")
|
|
140
|
+
for banned in BANNED_LOGLINE_WORDS:
|
|
141
|
+
if banned in logline.lower():
|
|
142
|
+
issues.append(f"Logline contains banned word: '{banned}'")
|
|
143
|
+
return issues
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def validate_concept(filepath: str, strict: bool = False) -> dict:
|
|
147
|
+
path = Path(filepath)
|
|
148
|
+
if not path.exists():
|
|
149
|
+
print(f"Error: File not found: {filepath}", file=sys.stderr)
|
|
150
|
+
sys.exit(1)
|
|
151
|
+
|
|
152
|
+
text = path.read_text(encoding="utf-8")
|
|
153
|
+
total_words = len(text.split())
|
|
154
|
+
|
|
155
|
+
results = []
|
|
156
|
+
score = 0
|
|
157
|
+
max_score = 0
|
|
158
|
+
|
|
159
|
+
for field in REQUIRED_FIELDS:
|
|
160
|
+
found, extracted, word_count = find_field(text, field)
|
|
161
|
+
is_required = field["weight"] == "required"
|
|
162
|
+
field_score = 2 if is_required else 1
|
|
163
|
+
max_score += field_score
|
|
164
|
+
|
|
165
|
+
issues = []
|
|
166
|
+
if not found:
|
|
167
|
+
issues.append(f"Field not found")
|
|
168
|
+
else:
|
|
169
|
+
min_words = field.get("min_words", 0)
|
|
170
|
+
max_words = field.get("max_words", 9999)
|
|
171
|
+
if word_count < min_words:
|
|
172
|
+
issues.append(f"Too thin ({word_count} words, min {min_words})")
|
|
173
|
+
if word_count > max_words:
|
|
174
|
+
issues.append(f"Too long ({word_count} words, max {max_words})")
|
|
175
|
+
|
|
176
|
+
if not issues:
|
|
177
|
+
score += field_score
|
|
178
|
+
|
|
179
|
+
results.append({
|
|
180
|
+
"field": field["label"],
|
|
181
|
+
"key": field["key"],
|
|
182
|
+
"weight": field["weight"],
|
|
183
|
+
"found": found,
|
|
184
|
+
"word_count": word_count,
|
|
185
|
+
"issues": issues,
|
|
186
|
+
"pass": len(issues) == 0,
|
|
187
|
+
})
|
|
188
|
+
|
|
189
|
+
# Logline quality check
|
|
190
|
+
logline_issues = validate_logline(text)
|
|
191
|
+
|
|
192
|
+
completeness = round((score / max_score) * 100) if max_score else 0
|
|
193
|
+
required_passing = all(r["pass"] for r in results if r["weight"] == "required")
|
|
194
|
+
|
|
195
|
+
return {
|
|
196
|
+
"file": str(path),
|
|
197
|
+
"total_words": total_words,
|
|
198
|
+
"score": score,
|
|
199
|
+
"max_score": max_score,
|
|
200
|
+
"completeness_pct": completeness,
|
|
201
|
+
"ready": required_passing and completeness >= 80,
|
|
202
|
+
"fields": results,
|
|
203
|
+
"logline_issues": logline_issues,
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def print_report(result: dict):
|
|
208
|
+
ready_icon = "✓" if result["ready"] else "✗"
|
|
209
|
+
print(f"\n{'═' * 60}")
|
|
210
|
+
print(f" CONCEPT DOCUMENT VALIDATION")
|
|
211
|
+
print(f" File: {result['file']}")
|
|
212
|
+
print(f"{'═' * 60}")
|
|
213
|
+
print(f" Completeness: {result['completeness_pct']}% ({result['score']}/{result['max_score']} points)")
|
|
214
|
+
print(f" Status: {ready_icon} {'READY for book-writer' if result['ready'] else 'NOT READY — fix required fields'}")
|
|
215
|
+
|
|
216
|
+
print(f"\n {'FIELD':<35} {'STATUS':<12} {'WORDS'}")
|
|
217
|
+
print(f" {'─' * 55}")
|
|
218
|
+
|
|
219
|
+
for field in result["fields"]:
|
|
220
|
+
if field["pass"]:
|
|
221
|
+
status = "✓ ok"
|
|
222
|
+
else:
|
|
223
|
+
status = "✗ " + (field["issues"][0][:20] if field["issues"] else "?")
|
|
224
|
+
req = " *" if field["weight"] == "required" else " "
|
|
225
|
+
print(f" {req}{field['field']:<33} {status:<12} {field['word_count']}")
|
|
226
|
+
|
|
227
|
+
if result["logline_issues"]:
|
|
228
|
+
print(f"\n LOGLINE ISSUES:")
|
|
229
|
+
for issue in result["logline_issues"]:
|
|
230
|
+
print(f" ⚠ {issue}")
|
|
231
|
+
|
|
232
|
+
missing = [f for f in result["fields"] if not f["pass"] and f["weight"] == "required"]
|
|
233
|
+
if missing:
|
|
234
|
+
print(f"\n REQUIRED FIXES ({len(missing)}):")
|
|
235
|
+
for f in missing:
|
|
236
|
+
for issue in f["issues"]:
|
|
237
|
+
print(f" ✗ {f['field']}: {issue}")
|
|
238
|
+
|
|
239
|
+
print(f"\n (* = required field)")
|
|
240
|
+
print(f"{'═' * 60}\n")
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
if __name__ == "__main__":
|
|
244
|
+
parser = argparse.ArgumentParser()
|
|
245
|
+
parser.add_argument("concept_file")
|
|
246
|
+
parser.add_argument("--json", action="store_true")
|
|
247
|
+
parser.add_argument("--strict", action="store_true")
|
|
248
|
+
args = parser.parse_args()
|
|
249
|
+
|
|
250
|
+
result = validate_concept(args.concept_file, args.strict)
|
|
251
|
+
|
|
252
|
+
if args.json:
|
|
253
|
+
print(json.dumps(result, indent=2))
|
|
254
|
+
else:
|
|
255
|
+
print_report(result)
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
word_count.py — Word count analyzer for book manuscripts
|
|
4
|
+
|
|
5
|
+
Counts words per chapter/section and compares against KDP targets.
|
|
6
|
+
Detects chapter boundaries automatically from heading patterns.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python word_count.py <manuscript_file>
|
|
10
|
+
python word_count.py <manuscript_file> --target <short|medium|full>
|
|
11
|
+
python word_count.py <manuscript_file> --json
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import sys
|
|
15
|
+
import re
|
|
16
|
+
import json
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
|
|
19
|
+
# ─── KDP Word Count Targets ───────────────────────────────────────────────────
|
|
20
|
+
|
|
21
|
+
TARGETS = {
|
|
22
|
+
"short": {"per_chapter": (1500, 2000), "total": (10000, 20000)},
|
|
23
|
+
"medium": {"per_chapter": (2000, 3500), "total": (20000, 50000)},
|
|
24
|
+
"full": {"per_chapter": (3500, 5000), "total": (50000, 100000)},
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
# Chapter heading patterns
|
|
28
|
+
CHAPTER_PATTERNS = [
|
|
29
|
+
re.compile(r"^#{1,2}\s+(Chapter\s+\d+|CHAPTER\s+\d+)", re.IGNORECASE),
|
|
30
|
+
re.compile(r"^Chapter\s+\d+[:\s]", re.IGNORECASE),
|
|
31
|
+
re.compile(r"^CHAPTER\s+\d+", re.IGNORECASE),
|
|
32
|
+
re.compile(r"^#{1,2}\s+\w"), # any h1/h2
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
SECTION_PATTERNS = [
|
|
36
|
+
re.compile(r"^My Story", re.IGNORECASE),
|
|
37
|
+
re.compile(r"^My Reflection", re.IGNORECASE),
|
|
38
|
+
re.compile(r"^#{3}\s+", re.IGNORECASE), # h3
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def count_words(text: str) -> int:
|
|
43
|
+
return len(re.findall(r"\b\w+\b", text))
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def is_chapter_heading(line: str) -> bool:
|
|
47
|
+
return any(p.match(line.strip()) for p in CHAPTER_PATTERNS)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def is_section_heading(line: str) -> bool:
|
|
51
|
+
return any(p.match(line.strip()) for p in SECTION_PATTERNS)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def parse_manuscript(filepath: str) -> list:
|
|
55
|
+
path = Path(filepath)
|
|
56
|
+
if not path.exists():
|
|
57
|
+
print(f"Error: File not found: {filepath}", file=sys.stderr)
|
|
58
|
+
sys.exit(1)
|
|
59
|
+
|
|
60
|
+
text = path.read_text(encoding="utf-8")
|
|
61
|
+
lines = text.splitlines()
|
|
62
|
+
|
|
63
|
+
chapters = []
|
|
64
|
+
current_chapter = None
|
|
65
|
+
current_section = None
|
|
66
|
+
front_matter_lines = []
|
|
67
|
+
in_front_matter = True
|
|
68
|
+
|
|
69
|
+
for line in lines:
|
|
70
|
+
if is_chapter_heading(line):
|
|
71
|
+
if current_chapter:
|
|
72
|
+
# save previous chapter
|
|
73
|
+
if current_section:
|
|
74
|
+
current_chapter["sections"].append(current_section)
|
|
75
|
+
current_section = None
|
|
76
|
+
chapters.append(current_chapter)
|
|
77
|
+
|
|
78
|
+
current_chapter = {
|
|
79
|
+
"title": line.strip().lstrip("#").strip(),
|
|
80
|
+
"content": "",
|
|
81
|
+
"sections": [],
|
|
82
|
+
"word_count": 0,
|
|
83
|
+
}
|
|
84
|
+
in_front_matter = False
|
|
85
|
+
|
|
86
|
+
elif is_section_heading(line) and current_chapter:
|
|
87
|
+
if current_section:
|
|
88
|
+
current_chapter["sections"].append(current_section)
|
|
89
|
+
current_section = {
|
|
90
|
+
"title": line.strip().lstrip("#").strip(),
|
|
91
|
+
"content": "",
|
|
92
|
+
"word_count": 0,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
else:
|
|
96
|
+
if in_front_matter:
|
|
97
|
+
front_matter_lines.append(line)
|
|
98
|
+
elif current_section:
|
|
99
|
+
current_section["content"] += line + "\n"
|
|
100
|
+
elif current_chapter:
|
|
101
|
+
current_chapter["content"] += line + "\n"
|
|
102
|
+
|
|
103
|
+
# flush last chapter/section
|
|
104
|
+
if current_section and current_chapter:
|
|
105
|
+
current_chapter["sections"].append(current_section)
|
|
106
|
+
if current_chapter:
|
|
107
|
+
chapters.append(current_chapter)
|
|
108
|
+
|
|
109
|
+
# calculate word counts
|
|
110
|
+
for ch in chapters:
|
|
111
|
+
section_words = sum(count_words(s["content"]) for s in ch["sections"])
|
|
112
|
+
for s in ch["sections"]:
|
|
113
|
+
s["word_count"] = count_words(s["content"])
|
|
114
|
+
ch["word_count"] = count_words(ch["content"]) + section_words
|
|
115
|
+
|
|
116
|
+
front_matter_text = "\n".join(front_matter_lines)
|
|
117
|
+
front_matter_words = count_words(front_matter_text)
|
|
118
|
+
|
|
119
|
+
return chapters, front_matter_words
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def assess_chapter(chapter: dict, target_range: tuple) -> str:
|
|
123
|
+
wc = chapter["word_count"]
|
|
124
|
+
lo, hi = target_range
|
|
125
|
+
if wc < lo * 0.8:
|
|
126
|
+
return "⚠ SHORT"
|
|
127
|
+
elif wc > hi * 1.2:
|
|
128
|
+
return "⚠ LONG"
|
|
129
|
+
elif lo <= wc <= hi:
|
|
130
|
+
return "✓ ON TARGET"
|
|
131
|
+
else:
|
|
132
|
+
return "~ CLOSE"
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def print_report(chapters: list, front_matter_words: int, target_key: str = None):
|
|
136
|
+
total_words = sum(ch["word_count"] for ch in chapters) + front_matter_words
|
|
137
|
+
target = TARGETS.get(target_key) if target_key else None
|
|
138
|
+
target_range = target["per_chapter"] if target else None
|
|
139
|
+
|
|
140
|
+
print(f"\n{'═' * 60}")
|
|
141
|
+
print(f" MANUSCRIPT WORD COUNT REPORT")
|
|
142
|
+
print(f"{'═' * 60}")
|
|
143
|
+
print(f" Total manuscript: {total_words:,} words")
|
|
144
|
+
print(f" Chapters detected: {len(chapters)}")
|
|
145
|
+
if front_matter_words:
|
|
146
|
+
print(f" Front matter: {front_matter_words:,} words")
|
|
147
|
+
|
|
148
|
+
if target:
|
|
149
|
+
tlo, thi = target["total"]
|
|
150
|
+
clo, chi = target["per_chapter"]
|
|
151
|
+
status = "✓" if tlo <= total_words <= thi else "⚠"
|
|
152
|
+
print(f" Target ({target_key}): {tlo:,}–{thi:,} words total | {clo:,}–{chi:,} per chapter")
|
|
153
|
+
print(f" Overall status: {status} {'ON TARGET' if tlo <= total_words <= thi else 'OFF TARGET'}")
|
|
154
|
+
|
|
155
|
+
print(f"\n {'CHAPTER':<40} {'WORDS':>7} {'STATUS':<14} SECTIONS")
|
|
156
|
+
print(f" {'─' * 56}")
|
|
157
|
+
|
|
158
|
+
for ch in chapters:
|
|
159
|
+
status = assess_chapter(ch, target_range) if target_range else ""
|
|
160
|
+
sections_info = ", ".join(
|
|
161
|
+
f"{s['title'][:15]} ({s['word_count']:,}w)" for s in ch["sections"]
|
|
162
|
+
) if ch["sections"] else "—"
|
|
163
|
+
print(f" {ch['title'][:38]:<40} {ch['word_count']:>7,} {status:<14} {sections_info[:30]}")
|
|
164
|
+
|
|
165
|
+
print(f"\n{'═' * 60}\n")
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
# ─── Main ─────────────────────────────────────────────────────────────────────
|
|
169
|
+
|
|
170
|
+
if __name__ == "__main__":
|
|
171
|
+
if len(sys.argv) < 2:
|
|
172
|
+
print("Usage: python word_count.py <file> [--target short|medium|full] [--json]")
|
|
173
|
+
sys.exit(1)
|
|
174
|
+
|
|
175
|
+
filepath = sys.argv[1]
|
|
176
|
+
target_key = None
|
|
177
|
+
mode = "--report"
|
|
178
|
+
|
|
179
|
+
for i, arg in enumerate(sys.argv[2:], 2):
|
|
180
|
+
if arg == "--target" and i + 1 < len(sys.argv):
|
|
181
|
+
target_key = sys.argv[i + 1]
|
|
182
|
+
elif arg == "--json":
|
|
183
|
+
mode = "--json"
|
|
184
|
+
|
|
185
|
+
chapters, front_matter_words = parse_manuscript(filepath)
|
|
186
|
+
|
|
187
|
+
if mode == "--json":
|
|
188
|
+
output = {
|
|
189
|
+
"total_words": sum(ch["word_count"] for ch in chapters) + front_matter_words,
|
|
190
|
+
"front_matter_words": front_matter_words,
|
|
191
|
+
"chapter_count": len(chapters),
|
|
192
|
+
"chapters": chapters,
|
|
193
|
+
}
|
|
194
|
+
print(json.dumps(output, indent=2))
|
|
195
|
+
else:
|
|
196
|
+
print_report(chapters, front_matter_words, target_key)
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|