@demig0d2/skills 1.0.2 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -45
- package/bin/cli.js +7 -3
- package/package.json +1 -1
- package/skills/book-creator/SKILL.md +848 -0
- package/skills/book-creator/references/kdp_specs.md +139 -0
- package/skills/book-creator/references/log_schema.md +149 -0
- package/skills/book-creator/references/patterns_quick_ref.md +71 -0
- package/skills/book-creator/references/thinkers_reference.md +104 -0
- package/skills/book-creator/scripts/__pycache__/bank_formatter.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/conflict_check.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/dna_scan.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/kdp_check.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/log_manager.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/scan_ai_patterns.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/score_report.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/toc_extract.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/validate_concept.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/__pycache__/word_count.cpython-312.pyc +0 -0
- package/skills/book-creator/scripts/bank_formatter.py +206 -0
- package/skills/book-creator/scripts/conflict_check.py +179 -0
- package/skills/book-creator/scripts/dna_scan.py +168 -0
- package/skills/book-creator/scripts/kdp_check.py +255 -0
- package/skills/book-creator/scripts/log_manager.py +258 -0
- package/skills/book-creator/scripts/scan_ai_patterns.py +279 -0
- package/skills/book-creator/scripts/score_report.py +237 -0
- package/skills/book-creator/scripts/toc_extract.py +151 -0
- package/skills/book-creator/scripts/validate_concept.py +255 -0
- package/skills/book-creator/scripts/word_count.py +196 -0
- package/skills/book-writer/scripts/__pycache__/kdp_check.cpython-312.pyc +0 -0
- package/skills/book-writer/scripts/__pycache__/toc_extract.cpython-312.pyc +0 -0
- package/skills/book-writer/scripts/__pycache__/word_count.cpython-312.pyc +0 -0
- package/skills/book-writer.zip +0 -0
- package/skills/chapter-auditor/scripts/__pycache__/score_report.cpython-312.pyc +0 -0
- package/skills/concept-expander/scripts/__pycache__/validate_concept.cpython-312.pyc +0 -0
- package/skills/continuity-tracker/scripts/__pycache__/conflict_check.cpython-312.pyc +0 -0
- package/skills/continuity-tracker/scripts/__pycache__/log_manager.cpython-312.pyc +0 -0
- package/skills/humanizer/scripts/__pycache__/dna_scan.cpython-312.pyc +0 -0
- package/skills/humanizer/scripts/__pycache__/scan_ai_patterns.cpython-312.pyc +0 -0
- package/skills/overhaul/scripts/__pycache__/changelog_gen.cpython-312.pyc +0 -0
- package/skills/overhaul/scripts/__pycache__/skill_parser.cpython-312.pyc +0 -0
- package/skills/overhaul/scripts/__pycache__/version_bump.cpython-312.pyc +0 -0
- package/skills/overhaul.zip +0 -0
- package/skills/research-aggregator/scripts/__pycache__/bank_formatter.cpython-312.pyc +0 -0
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
kdp_check.py — KDP pre-flight checker for manuscript DOCX files
|
|
4
|
+
|
|
5
|
+
Validates a DOCX manuscript against KDP's technical requirements
|
|
6
|
+
before upload. Checks margins, page size, fonts, images, and structure.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python kdp_check.py <manuscript.docx>
|
|
10
|
+
python kdp_check.py <manuscript.docx> --trim 6x9
|
|
11
|
+
python kdp_check.py <manuscript.docx> --json
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import sys
|
|
15
|
+
import json
|
|
16
|
+
import zipfile
|
|
17
|
+
import re
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
|
|
20
|
+
# ─── KDP Requirements ─────────────────────────────────────────────────────────
|
|
21
|
+
|
|
22
|
+
# Trim sizes in EMU (914400 EMU = 1 inch)
|
|
23
|
+
TRIM_SIZES = {
|
|
24
|
+
"5x8": {"w": 4572000, "h": 7315200},
|
|
25
|
+
"6x9": {"w": 5486400, "h": 8229600},
|
|
26
|
+
"8.5x11": {"w": 7772400, "h": 10058400},
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
# Minimum margins in EMU (KDP requires at least 0.5" outside, 0.625" inside)
|
|
30
|
+
MIN_MARGINS = {
|
|
31
|
+
"outside": 457200, # 0.5 inch
|
|
32
|
+
"inside": 571500, # 0.625 inch
|
|
33
|
+
"top": 457200,
|
|
34
|
+
"bottom": 457200,
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
# KDP safe fonts (always render correctly)
|
|
38
|
+
SAFE_FONTS = {
|
|
39
|
+
"georgia", "times new roman", "garamond", "palatino linotype",
|
|
40
|
+
"arial", "helvetica", "calibri", "cambria", "courier new",
|
|
41
|
+
"book antiqua", "century", "trebuchet ms",
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
KDP_MIN_IMAGE_DPI = 300
|
|
45
|
+
KDP_MAX_FILE_SIZE_MB = 650
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def check_file_size(filepath: str) -> dict:
|
|
49
|
+
size_mb = Path(filepath).stat().st_size / (1024 * 1024)
|
|
50
|
+
return {
|
|
51
|
+
"check": "File size",
|
|
52
|
+
"value": f"{size_mb:.1f} MB",
|
|
53
|
+
"pass": size_mb <= KDP_MAX_FILE_SIZE_MB,
|
|
54
|
+
"requirement": f"≤ {KDP_MAX_FILE_SIZE_MB} MB",
|
|
55
|
+
"note": "" if size_mb <= KDP_MAX_FILE_SIZE_MB else "File too large for KDP upload",
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def extract_docx_xml(filepath: str) -> dict:
|
|
60
|
+
"""Extract relevant XML files from DOCX."""
|
|
61
|
+
xml_files = {}
|
|
62
|
+
try:
|
|
63
|
+
with zipfile.ZipFile(filepath, "r") as z:
|
|
64
|
+
for name in z.namelist():
|
|
65
|
+
if name in ("word/document.xml", "word/settings.xml",
|
|
66
|
+
"word/styles.xml", "[Content_Types].xml"):
|
|
67
|
+
xml_files[name] = z.read(name).decode("utf-8", errors="replace")
|
|
68
|
+
except zipfile.BadZipFile:
|
|
69
|
+
print(f"Error: Not a valid DOCX file: {filepath}", file=sys.stderr)
|
|
70
|
+
sys.exit(1)
|
|
71
|
+
return xml_files
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def check_page_size(xml_files: dict, trim_key: str = None) -> dict:
|
|
75
|
+
doc_xml = xml_files.get("word/document.xml", "")
|
|
76
|
+
# Look for w:pgSz element
|
|
77
|
+
match = re.search(r'<w:pgSz[^>]+w:w="(\d+)"[^>]+w:h="(\d+)"', doc_xml)
|
|
78
|
+
if not match:
|
|
79
|
+
match = re.search(r'<w:pgSz[^>]+w:h="(\d+)"[^>]+w:w="(\d+)"', doc_xml)
|
|
80
|
+
if match:
|
|
81
|
+
w_dxa, h_dxa = int(match.group(2)), int(match.group(1))
|
|
82
|
+
else:
|
|
83
|
+
return {
|
|
84
|
+
"check": "Page size",
|
|
85
|
+
"pass": None,
|
|
86
|
+
"value": "Not detected",
|
|
87
|
+
"requirement": "Must match chosen trim size",
|
|
88
|
+
"note": "Could not read page dimensions from document",
|
|
89
|
+
}
|
|
90
|
+
else:
|
|
91
|
+
w_dxa, h_dxa = int(match.group(1)), int(match.group(2))
|
|
92
|
+
|
|
93
|
+
# DXA to inches (1440 DXA = 1 inch)
|
|
94
|
+
w_in = w_dxa / 1440
|
|
95
|
+
h_in = h_dxa / 1440
|
|
96
|
+
|
|
97
|
+
if trim_key and trim_key in TRIM_SIZES:
|
|
98
|
+
expected = TRIM_SIZES[trim_key]
|
|
99
|
+
# Convert EMU to DXA (914400 EMU = 1 inch = 1440 DXA, so EMU/635 = DXA)
|
|
100
|
+
exp_w_dxa = expected["w"] // 635
|
|
101
|
+
exp_h_dxa = expected["h"] // 635
|
|
102
|
+
passed = abs(w_dxa - exp_w_dxa) < 100 and abs(h_dxa - exp_h_dxa) < 100
|
|
103
|
+
req = f"{trim_key} ({exp_w_dxa/1440:.2f}\" × {exp_h_dxa/1440:.2f}\")"
|
|
104
|
+
else:
|
|
105
|
+
passed = any(
|
|
106
|
+
abs(w_dxa - ts["w"] // 635) < 100
|
|
107
|
+
for ts in TRIM_SIZES.values()
|
|
108
|
+
)
|
|
109
|
+
req = "One of: 5×8, 6×9, 8.5×11"
|
|
110
|
+
|
|
111
|
+
return {
|
|
112
|
+
"check": "Page size",
|
|
113
|
+
"pass": passed,
|
|
114
|
+
"value": f"{w_in:.2f}\" × {h_in:.2f}\"",
|
|
115
|
+
"requirement": req,
|
|
116
|
+
"note": "" if passed else f"Page size doesn't match expected trim",
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def check_margins(xml_files: dict) -> dict:
|
|
121
|
+
doc_xml = xml_files.get("word/document.xml", "")
|
|
122
|
+
match = re.search(
|
|
123
|
+
r'<w:pgMar[^>]+w:top="(\d+)"[^>]+w:right="(\d+)"[^>]+w:bottom="(\d+)"[^>]+w:left="(\d+)"',
|
|
124
|
+
doc_xml,
|
|
125
|
+
)
|
|
126
|
+
if not match:
|
|
127
|
+
return {
|
|
128
|
+
"check": "Margins",
|
|
129
|
+
"pass": None,
|
|
130
|
+
"value": "Not detected",
|
|
131
|
+
"requirement": "Min 0.5\" outside, 0.625\" inside",
|
|
132
|
+
"note": "Could not read margins from document",
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
top, right, bottom, left = (int(match.group(i)) for i in range(1, 5))
|
|
136
|
+
|
|
137
|
+
# KDP: inside margin (left for recto) must be at least 864 DXA (0.6")
|
|
138
|
+
# outside margin must be at least 720 DXA (0.5")
|
|
139
|
+
min_outside = 720 # DXA
|
|
140
|
+
min_inside = 864 # DXA
|
|
141
|
+
|
|
142
|
+
issues = []
|
|
143
|
+
if top < min_outside:
|
|
144
|
+
issues.append(f"Top margin {top/1440:.2f}\" < 0.5\"")
|
|
145
|
+
if bottom < min_outside:
|
|
146
|
+
issues.append(f"Bottom margin {bottom/1440:.2f}\" < 0.5\"")
|
|
147
|
+
if left < min_inside:
|
|
148
|
+
issues.append(f"Left margin {left/1440:.2f}\" < 0.6\"")
|
|
149
|
+
if right < min_outside:
|
|
150
|
+
issues.append(f"Right margin {right/1440:.2f}\" < 0.5\"")
|
|
151
|
+
|
|
152
|
+
return {
|
|
153
|
+
"check": "Margins",
|
|
154
|
+
"pass": len(issues) == 0,
|
|
155
|
+
"value": f"T:{top/1440:.2f}\" R:{right/1440:.2f}\" B:{bottom/1440:.2f}\" L:{left/1440:.2f}\"",
|
|
156
|
+
"requirement": "Min 0.5\" all sides, 0.6\" inside",
|
|
157
|
+
"note": "; ".join(issues) if issues else "",
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def check_fonts(xml_files: dict) -> dict:
|
|
162
|
+
styles_xml = xml_files.get("word/styles.xml", "")
|
|
163
|
+
fonts_found = set(re.findall(r'w:name="([^"]+)"', styles_xml))
|
|
164
|
+
# Filter to font-like names (not style names)
|
|
165
|
+
font_names = {f.lower() for f in fonts_found if len(f) > 3 and " " not in f or f.lower() in SAFE_FONTS}
|
|
166
|
+
unsafe = [f for f in font_names if f not in SAFE_FONTS and len(f) > 3]
|
|
167
|
+
|
|
168
|
+
return {
|
|
169
|
+
"check": "Fonts",
|
|
170
|
+
"pass": len(unsafe) == 0,
|
|
171
|
+
"value": f"{len(font_names)} font(s) detected",
|
|
172
|
+
"requirement": "Use standard embeddable fonts",
|
|
173
|
+
"note": f"Possibly non-standard: {', '.join(list(unsafe)[:3])}" if unsafe else "",
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def check_content_types(xml_files: dict) -> dict:
|
|
178
|
+
ct_xml = xml_files.get("[Content_Types].xml", "")
|
|
179
|
+
has_rels = "relationships" in ct_xml.lower()
|
|
180
|
+
has_doc = "wordprocessingml" in ct_xml.lower()
|
|
181
|
+
return {
|
|
182
|
+
"check": "Document structure",
|
|
183
|
+
"pass": has_rels and has_doc,
|
|
184
|
+
"value": "Valid DOCX structure" if (has_rels and has_doc) else "Invalid structure",
|
|
185
|
+
"requirement": "Valid OOXML structure",
|
|
186
|
+
"note": "" if (has_rels and has_doc) else "Document may be corrupted",
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def print_report(checks: list, filepath: str):
|
|
191
|
+
passed = sum(1 for c in checks if c["pass"] is True)
|
|
192
|
+
failed = sum(1 for c in checks if c["pass"] is False)
|
|
193
|
+
unknown = sum(1 for c in checks if c["pass"] is None)
|
|
194
|
+
|
|
195
|
+
print(f"\n{'═' * 60}")
|
|
196
|
+
print(f" KDP PRE-FLIGHT CHECK")
|
|
197
|
+
print(f" File: {filepath}")
|
|
198
|
+
print(f"{'═' * 60}")
|
|
199
|
+
print(f" {'CHECK':<24} {'STATUS':<16} {'VALUE':<22} NOTE")
|
|
200
|
+
print(f" {'─' * 56}")
|
|
201
|
+
|
|
202
|
+
for c in checks:
|
|
203
|
+
if c["pass"] is True:
|
|
204
|
+
status = "✓ PASS"
|
|
205
|
+
elif c["pass"] is False:
|
|
206
|
+
status = "✗ FAIL"
|
|
207
|
+
else:
|
|
208
|
+
status = "? UNKNOWN"
|
|
209
|
+
note = c.get("note", "")[:30]
|
|
210
|
+
print(f" {c['check']:<24} {status:<16} {c['value']:<22} {note}")
|
|
211
|
+
|
|
212
|
+
print(f"\n {'─' * 56}")
|
|
213
|
+
print(f" Result: {passed} passed {failed} failed {unknown} unknown")
|
|
214
|
+
if failed == 0 and unknown == 0:
|
|
215
|
+
print(f" ✓ Ready for KDP upload.")
|
|
216
|
+
elif failed > 0:
|
|
217
|
+
print(f" ✗ Fix {failed} issue(s) before uploading.")
|
|
218
|
+
print(f"\n{'═' * 60}\n")
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
# ─── Main ─────────────────────────────────────────────────────────────────────
|
|
222
|
+
|
|
223
|
+
if __name__ == "__main__":
|
|
224
|
+
if len(sys.argv) < 2:
|
|
225
|
+
print("Usage: python kdp_check.py <file.docx> [--trim 5x8|6x9|8.5x11] [--json]")
|
|
226
|
+
sys.exit(1)
|
|
227
|
+
|
|
228
|
+
filepath = sys.argv[1]
|
|
229
|
+
trim_key = None
|
|
230
|
+
mode = "--report"
|
|
231
|
+
|
|
232
|
+
for i, arg in enumerate(sys.argv[2:], 2):
|
|
233
|
+
if arg == "--trim" and i + 1 < len(sys.argv):
|
|
234
|
+
trim_key = sys.argv[i + 1]
|
|
235
|
+
elif arg == "--json":
|
|
236
|
+
mode = "--json"
|
|
237
|
+
|
|
238
|
+
if not Path(filepath).exists():
|
|
239
|
+
print(f"Error: File not found: {filepath}", file=sys.stderr)
|
|
240
|
+
sys.exit(1)
|
|
241
|
+
|
|
242
|
+
xml_files = extract_docx_xml(filepath)
|
|
243
|
+
|
|
244
|
+
checks = [
|
|
245
|
+
check_file_size(filepath),
|
|
246
|
+
check_page_size(xml_files, trim_key),
|
|
247
|
+
check_margins(xml_files),
|
|
248
|
+
check_fonts(xml_files),
|
|
249
|
+
check_content_types(xml_files),
|
|
250
|
+
]
|
|
251
|
+
|
|
252
|
+
if mode == "--json":
|
|
253
|
+
print(json.dumps({"file": filepath, "checks": checks}, indent=2))
|
|
254
|
+
else:
|
|
255
|
+
print_report(checks, filepath)
|
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
log_manager.py — Read, write, and update the continuity log
|
|
4
|
+
|
|
5
|
+
The continuity log is stored as a JSON file alongside the manuscript.
|
|
6
|
+
This script manages all log operations: init, update, read, and summary.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python log_manager.py init <book_title> [--log log.json]
|
|
10
|
+
python log_manager.py show [--log log.json]
|
|
11
|
+
python log_manager.py summary [--log log.json]
|
|
12
|
+
python log_manager.py add-chapter <N> <title> [--log log.json]
|
|
13
|
+
python log_manager.py add-fact <fact> [--log log.json]
|
|
14
|
+
python log_manager.py add-insight <chapter_n> <insight> [--log log.json]
|
|
15
|
+
python log_manager.py add-metaphor <chapter_n> <metaphor> [--log log.json]
|
|
16
|
+
python log_manager.py add-thread <thread> [--log log.json]
|
|
17
|
+
python log_manager.py close-thread <thread_index> [--log log.json]
|
|
18
|
+
python log_manager.py threads [--log log.json]
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
import sys
|
|
22
|
+
import json
|
|
23
|
+
import argparse
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
from datetime import datetime
|
|
26
|
+
|
|
27
|
+
DEFAULT_LOG = "continuity_log.json"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def load_log(log_path: str) -> dict:
|
|
31
|
+
path = Path(log_path)
|
|
32
|
+
if path.exists():
|
|
33
|
+
return json.loads(path.read_text(encoding="utf-8"))
|
|
34
|
+
return None
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def save_log(log: dict, log_path: str):
|
|
38
|
+
Path(log_path).write_text(json.dumps(log, indent=2, ensure_ascii=False), encoding="utf-8")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def init_log(book_title: str, log_path: str) -> dict:
|
|
42
|
+
log = {
|
|
43
|
+
"book_title": book_title,
|
|
44
|
+
"created": datetime.now().isoformat(),
|
|
45
|
+
"last_updated": datetime.now().isoformat(),
|
|
46
|
+
"last_chapter": 0,
|
|
47
|
+
"commitments": [],
|
|
48
|
+
"established_facts": [],
|
|
49
|
+
"metaphors": {
|
|
50
|
+
"by_chapter": {},
|
|
51
|
+
"available": [],
|
|
52
|
+
"retired": [],
|
|
53
|
+
},
|
|
54
|
+
"insights": {},
|
|
55
|
+
"tone_decisions": {
|
|
56
|
+
"register": "",
|
|
57
|
+
"reader_address": "",
|
|
58
|
+
"author_position": "",
|
|
59
|
+
"emotional_ceiling": "",
|
|
60
|
+
},
|
|
61
|
+
"structural_patterns": {
|
|
62
|
+
"section_format": "",
|
|
63
|
+
"avg_chapter_words": 0,
|
|
64
|
+
"opening_style": "",
|
|
65
|
+
"closing_style": "",
|
|
66
|
+
"chapter_word_counts": [],
|
|
67
|
+
},
|
|
68
|
+
"open_threads": [],
|
|
69
|
+
"closed_threads": [],
|
|
70
|
+
"chapter_summaries": {},
|
|
71
|
+
}
|
|
72
|
+
save_log(log, log_path)
|
|
73
|
+
print(f" ✓ Log initialized: {log_path}")
|
|
74
|
+
print(f" Book: {book_title}")
|
|
75
|
+
return log
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def show_log(log: dict):
|
|
79
|
+
print(f"\n{'═' * 60}")
|
|
80
|
+
print(f" CONTINUITY LOG — {log['book_title']}")
|
|
81
|
+
print(f" Last updated after Chapter {log['last_chapter']}")
|
|
82
|
+
print(f"{'═' * 60}")
|
|
83
|
+
|
|
84
|
+
if log["commitments"]:
|
|
85
|
+
print(f"\n BOOK-LEVEL COMMITMENTS ({len(log['commitments'])}):")
|
|
86
|
+
for c in log["commitments"]:
|
|
87
|
+
print(f" • {c}")
|
|
88
|
+
|
|
89
|
+
if log["established_facts"]:
|
|
90
|
+
print(f"\n ESTABLISHED FACTS ({len(log['established_facts'])}):")
|
|
91
|
+
for f in log["established_facts"]:
|
|
92
|
+
print(f" • {f}")
|
|
93
|
+
|
|
94
|
+
if log["metaphors"]["by_chapter"]:
|
|
95
|
+
print(f"\n METAPHORS USED:")
|
|
96
|
+
for ch, mets in sorted(log["metaphors"]["by_chapter"].items(), key=lambda x: int(x[0])):
|
|
97
|
+
print(f" Ch.{ch}: {', '.join(mets)}")
|
|
98
|
+
if log["metaphors"]["retired"]:
|
|
99
|
+
print(f" Retired: {', '.join(log['metaphors']['retired'])}")
|
|
100
|
+
|
|
101
|
+
if log["insights"]:
|
|
102
|
+
print(f"\n INSIGHTS DELIVERED:")
|
|
103
|
+
for ch, insight in sorted(log["insights"].items(), key=lambda x: int(x[0])):
|
|
104
|
+
print(f" Ch.{ch}: {insight}")
|
|
105
|
+
|
|
106
|
+
open_threads = [t for t in log["open_threads"] if not t.get("closed")]
|
|
107
|
+
if open_threads:
|
|
108
|
+
print(f"\n OPEN THREADS ({len(open_threads)}):")
|
|
109
|
+
for i, t in enumerate(open_threads):
|
|
110
|
+
print(f" [{i}] {t['thread']}")
|
|
111
|
+
if t.get("introduced_in"):
|
|
112
|
+
print(f" Introduced: Ch.{t['introduced_in']}")
|
|
113
|
+
|
|
114
|
+
if log["chapter_summaries"]:
|
|
115
|
+
print(f"\n CHAPTER SUMMARIES ({len(log['chapter_summaries'])}):")
|
|
116
|
+
for ch, summary in sorted(log["chapter_summaries"].items(), key=lambda x: int(x[0])):
|
|
117
|
+
title = summary.get("title", f"Chapter {ch}")
|
|
118
|
+
text = summary.get("summary", "")[:80]
|
|
119
|
+
print(f" Ch.{ch} — {title}: {text}...")
|
|
120
|
+
|
|
121
|
+
print(f"\n{'═' * 60}\n")
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def summary_log(log: dict):
|
|
125
|
+
open_threads = [t for t in log["open_threads"] if not t.get("closed")]
|
|
126
|
+
print(f"\n {log['book_title']} — Continuity Summary")
|
|
127
|
+
print(f" Chapters completed: {log['last_chapter']}")
|
|
128
|
+
print(f" Facts established: {len(log['established_facts'])}")
|
|
129
|
+
print(f" Insights delivered: {len(log['insights'])}")
|
|
130
|
+
print(f" Open threads: {len(open_threads)}")
|
|
131
|
+
if open_threads:
|
|
132
|
+
print(f" ⚠ Unresolved threads:")
|
|
133
|
+
for t in open_threads:
|
|
134
|
+
print(f" · {t['thread'][:60]}")
|
|
135
|
+
print()
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
if __name__ == "__main__":
|
|
139
|
+
parser = argparse.ArgumentParser()
|
|
140
|
+
parser.add_argument("command", choices=[
|
|
141
|
+
"init", "show", "summary", "add-chapter", "add-fact",
|
|
142
|
+
"add-insight", "add-metaphor", "add-thread", "close-thread", "threads"
|
|
143
|
+
])
|
|
144
|
+
parser.add_argument("args", nargs="*")
|
|
145
|
+
parser.add_argument("--log", default=DEFAULT_LOG)
|
|
146
|
+
parsed = parser.parse_args()
|
|
147
|
+
|
|
148
|
+
log_path = parsed.log
|
|
149
|
+
cmd = parsed.command
|
|
150
|
+
args = parsed.args
|
|
151
|
+
|
|
152
|
+
if cmd == "init":
|
|
153
|
+
title = " ".join(args) if args else "Untitled"
|
|
154
|
+
init_log(title, log_path)
|
|
155
|
+
|
|
156
|
+
elif cmd in ("show",):
|
|
157
|
+
log = load_log(log_path)
|
|
158
|
+
if not log:
|
|
159
|
+
print(f" No log found at {log_path}. Run: python log_manager.py init <title>")
|
|
160
|
+
sys.exit(1)
|
|
161
|
+
show_log(log)
|
|
162
|
+
|
|
163
|
+
elif cmd == "summary":
|
|
164
|
+
log = load_log(log_path)
|
|
165
|
+
if not log:
|
|
166
|
+
print(f" No log found at {log_path}")
|
|
167
|
+
sys.exit(1)
|
|
168
|
+
summary_log(log)
|
|
169
|
+
|
|
170
|
+
elif cmd == "add-chapter":
|
|
171
|
+
log = load_log(log_path)
|
|
172
|
+
if not log:
|
|
173
|
+
print(" No log found. Run init first.")
|
|
174
|
+
sys.exit(1)
|
|
175
|
+
n = int(args[0]) if args else log["last_chapter"] + 1
|
|
176
|
+
title = " ".join(args[1:]) if len(args) > 1 else f"Chapter {n}"
|
|
177
|
+
log["last_chapter"] = n
|
|
178
|
+
log["last_updated"] = datetime.now().isoformat()
|
|
179
|
+
if str(n) not in log["chapter_summaries"]:
|
|
180
|
+
log["chapter_summaries"][str(n)] = {"title": title, "summary": ""}
|
|
181
|
+
save_log(log, log_path)
|
|
182
|
+
print(f" ✓ Chapter {n}: {title} added to log")
|
|
183
|
+
|
|
184
|
+
elif cmd == "add-fact":
|
|
185
|
+
log = load_log(log_path)
|
|
186
|
+
if not log:
|
|
187
|
+
sys.exit(1)
|
|
188
|
+
fact = " ".join(args)
|
|
189
|
+
log["established_facts"].append(fact)
|
|
190
|
+
log["last_updated"] = datetime.now().isoformat()
|
|
191
|
+
save_log(log, log_path)
|
|
192
|
+
print(f" ✓ Fact added: {fact}")
|
|
193
|
+
|
|
194
|
+
elif cmd == "add-insight":
|
|
195
|
+
log = load_log(log_path)
|
|
196
|
+
if not log:
|
|
197
|
+
sys.exit(1)
|
|
198
|
+
ch = args[0] if args else str(log["last_chapter"])
|
|
199
|
+
insight = " ".join(args[1:]) if len(args) > 1 else ""
|
|
200
|
+
log["insights"][ch] = insight
|
|
201
|
+
log["last_updated"] = datetime.now().isoformat()
|
|
202
|
+
save_log(log, log_path)
|
|
203
|
+
print(f" ✓ Insight for Ch.{ch} added")
|
|
204
|
+
|
|
205
|
+
elif cmd == "add-metaphor":
|
|
206
|
+
log = load_log(log_path)
|
|
207
|
+
if not log:
|
|
208
|
+
sys.exit(1)
|
|
209
|
+
ch = args[0] if args else str(log["last_chapter"])
|
|
210
|
+
metaphor = " ".join(args[1:]) if len(args) > 1 else ""
|
|
211
|
+
if ch not in log["metaphors"]["by_chapter"]:
|
|
212
|
+
log["metaphors"]["by_chapter"][ch] = []
|
|
213
|
+
log["metaphors"]["by_chapter"][ch].append(metaphor)
|
|
214
|
+
log["metaphors"]["retired"].append(metaphor)
|
|
215
|
+
log["last_updated"] = datetime.now().isoformat()
|
|
216
|
+
save_log(log, log_path)
|
|
217
|
+
print(f" ✓ Metaphor added to Ch.{ch} and retired")
|
|
218
|
+
|
|
219
|
+
elif cmd == "add-thread":
|
|
220
|
+
log = load_log(log_path)
|
|
221
|
+
if not log:
|
|
222
|
+
sys.exit(1)
|
|
223
|
+
thread = " ".join(args)
|
|
224
|
+
log["open_threads"].append({
|
|
225
|
+
"thread": thread,
|
|
226
|
+
"introduced_in": str(log["last_chapter"]),
|
|
227
|
+
"closed": False,
|
|
228
|
+
"added": datetime.now().isoformat(),
|
|
229
|
+
})
|
|
230
|
+
log["last_updated"] = datetime.now().isoformat()
|
|
231
|
+
save_log(log, log_path)
|
|
232
|
+
print(f" ✓ Thread added: {thread}")
|
|
233
|
+
|
|
234
|
+
elif cmd == "close-thread":
|
|
235
|
+
log = load_log(log_path)
|
|
236
|
+
if not log:
|
|
237
|
+
sys.exit(1)
|
|
238
|
+
idx = int(args[0]) if args else -1
|
|
239
|
+
open_threads = [t for t in log["open_threads"] if not t.get("closed")]
|
|
240
|
+
if 0 <= idx < len(open_threads):
|
|
241
|
+
open_threads[idx]["closed"] = True
|
|
242
|
+
open_threads[idx]["closed_in"] = str(log["last_chapter"])
|
|
243
|
+
log["closed_threads"].append(open_threads[idx])
|
|
244
|
+
log["last_updated"] = datetime.now().isoformat()
|
|
245
|
+
save_log(log, log_path)
|
|
246
|
+
print(f" ✓ Thread [{idx}] closed: {open_threads[idx]['thread'][:60]}")
|
|
247
|
+
else:
|
|
248
|
+
print(f" ✗ Invalid thread index: {idx}")
|
|
249
|
+
|
|
250
|
+
elif cmd == "threads":
|
|
251
|
+
log = load_log(log_path)
|
|
252
|
+
if not log:
|
|
253
|
+
sys.exit(1)
|
|
254
|
+
open_threads = [t for t in log["open_threads"] if not t.get("closed")]
|
|
255
|
+
print(f"\n Open threads ({len(open_threads)}):\n")
|
|
256
|
+
for i, t in enumerate(open_threads):
|
|
257
|
+
print(f" [{i}] Introduced Ch.{t.get('introduced_in', '?')}: {t['thread']}")
|
|
258
|
+
print()
|