novel-writer-cli 0.3.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +1 -1
  2. package/agents/chapter-writer.md +43 -14
  3. package/agents/character-weaver.md +7 -1
  4. package/agents/plot-architect.md +20 -7
  5. package/agents/quality-judge.md +199 -20
  6. package/agents/style-analyzer.md +14 -8
  7. package/agents/style-refiner.md +10 -3
  8. package/agents/world-builder.md +8 -1
  9. package/dist/__tests__/agent-prompts-anti-ai-upgrade.test.js +194 -6
  10. package/dist/__tests__/agent-prompts-platform-expansion.test.js +33 -0
  11. package/dist/__tests__/anti-ai-infrastructure.test.js +548 -0
  12. package/dist/__tests__/anti-ai-templates.test.js +2 -2
  13. package/dist/__tests__/canon-status-lifecycle.test.js +481 -0
  14. package/dist/__tests__/commit-gate-decision.test.js +65 -0
  15. package/dist/__tests__/commit-prototype-pollution.test.js +1 -1
  16. package/dist/__tests__/excitement-type-annotation.test.js +240 -0
  17. package/dist/__tests__/excitement-type.test.js +21 -0
  18. package/dist/__tests__/gate-decision.test.js +62 -15
  19. package/dist/__tests__/genre-excitement-mapping.test.js +355 -0
  20. package/dist/__tests__/golden-chapter-gates.test.js +79 -0
  21. package/dist/__tests__/golden-chapter-mini-planning.test.js +485 -0
  22. package/dist/__tests__/helpers/quickstart-mini-planning.js +61 -0
  23. package/dist/__tests__/init.test.js +57 -5
  24. package/dist/__tests__/instructions-platform-expansion.test.js +125 -0
  25. package/dist/__tests__/next-step-gate-decision-routing.test.js +98 -0
  26. package/dist/__tests__/orchestrator-state-write-path.test.js +1 -1
  27. package/dist/__tests__/platform-profile.test.js +57 -1
  28. package/dist/__tests__/quickstart-pipeline.test.js +73 -6
  29. package/dist/__tests__/scoring-weights.test.js +193 -0
  30. package/dist/__tests__/steps-id.test.js +2 -0
  31. package/dist/__tests__/validate-quickstart-prereqs.test.js +2 -0
  32. package/dist/advance.js +27 -2
  33. package/dist/anti-ai-context.js +535 -0
  34. package/dist/cli.js +3 -1
  35. package/dist/commit.js +22 -0
  36. package/dist/excitement-type.js +12 -0
  37. package/dist/gate-decision.js +98 -2
  38. package/dist/golden-chapter-gates.js +143 -0
  39. package/dist/init.js +76 -7
  40. package/dist/instructions.js +552 -6
  41. package/dist/next-step.js +124 -88
  42. package/dist/platform-profile.js +20 -8
  43. package/dist/quickstart-mini-planning.js +30 -0
  44. package/dist/scoring-weights.js +38 -3
  45. package/dist/steps.js +1 -1
  46. package/dist/validate.js +293 -214
  47. package/dist/volume-commit.js +271 -5
  48. package/dist/volume-planning.js +78 -3
  49. package/docs/user/README.md +1 -0
  50. package/docs/user/migration-guide.md +166 -0
  51. package/docs/user/novel-cli.md +4 -3
  52. package/docs/user/quick-start.md +354 -57
  53. package/package.json +1 -1
  54. package/schemas/platform-profile.schema.json +2 -2
  55. package/scripts/lint-blacklist.sh +221 -76
  56. package/scripts/lint-structural.sh +538 -0
  57. package/skills/continue/SKILL.md +6 -0
  58. package/skills/continue/references/context-contracts.md +71 -6
  59. package/skills/continue/references/periodic-maintenance.md +12 -1
  60. package/skills/novel-writing/references/quality-rubric.md +79 -26
  61. package/skills/novel-writing/references/style-guide.md +129 -19
  62. package/skills/start/SKILL.md +23 -3
  63. package/skills/start/references/vol-planning.md +12 -3
  64. package/templates/ai-blacklist.json +1024 -246
  65. package/templates/ai-sentence-patterns.json +167 -0
  66. package/templates/genre-excitement-map.json +48 -0
  67. package/templates/genre-golden-standards.json +80 -0
  68. package/templates/genre-weight-profiles.json +15 -0
  69. package/templates/golden-chapter-gates.json +230 -0
  70. package/templates/novel-ask/example.question.json +3 -2
  71. package/templates/platform-profile.json +141 -1
  72. package/templates/platforms/fanqie.md +35 -0
  73. package/templates/platforms/jinjiang.md +35 -0
  74. package/templates/platforms/qidian.md +35 -0
  75. package/templates/style-profile-template.json +3 -0
@@ -12,14 +12,6 @@
12
12
  # 0 = success (valid JSON emitted to stdout)
13
13
  # 1 = validation failure (bad args, missing files, invalid JSON/schema)
14
14
  # 2 = script exception (unexpected runtime error)
15
- #
16
- # Notes:
17
- # - Treats optional whitelist/exemptions as "do not count as hits":
18
- # - ai-blacklist.json.whitelist (list[str])
19
- # - ai-blacklist.json.whitelist.words (list[str])
20
- # - ai-blacklist.json.exemptions.words (list[str])
21
- #
22
- # - Hit rate is computed as "hits per 1000 non-whitespace characters" (次/千字).
23
15
 
24
16
  set -euo pipefail
25
17
 
@@ -50,7 +42,17 @@ python3 - "$chapter_path" "$blacklist_path" <<'PY'
50
42
  import json
51
43
  import re
52
44
  import sys
53
- from typing import Any, Dict, List, Set
45
+ from dataclasses import dataclass
46
+ from typing import Any, Dict, List, Optional, Set, Tuple
47
+
48
+
49
+ @dataclass
50
+ class WordEntry:
51
+ word: str
52
+ category: Optional[str]
53
+ replacement_hint: Optional[str]
54
+ per_chapter_max: Optional[int]
55
+ context: Optional[str]
54
56
 
55
57
 
56
58
  def _die(msg: str, exit_code: int = 1) -> None:
@@ -60,15 +62,13 @@ def _die(msg: str, exit_code: int = 1) -> None:
60
62
 
61
63
  def _load_json(path: str) -> Any:
62
64
  try:
63
- with open(path, "r", encoding="utf-8") as f:
64
- return json.load(f)
65
- except Exception as e:
66
- _die(f"lint-blacklist.sh: invalid JSON at {path}: {e}", 1)
65
+ with open(path, "r", encoding="utf-8-sig") as handle:
66
+ return json.load(handle)
67
+ except Exception as exc:
68
+ _die(f"lint-blacklist.sh: invalid JSON at {path}: {exc}", 1)
67
69
 
68
70
 
69
71
  def _as_str_list(value: Any) -> List[str]:
70
- if value is None:
71
- return []
72
72
  if not isinstance(value, list):
73
73
  return []
74
74
  out: List[str] = []
@@ -80,7 +80,6 @@ def _as_str_list(value: Any) -> List[str]:
80
80
 
81
81
  def _get_whitelist_words(blacklist: Dict[str, Any]) -> Set[str]:
82
82
  words: List[str] = []
83
-
84
83
  whitelist = blacklist.get("whitelist")
85
84
  if isinstance(whitelist, list):
86
85
  words.extend(_as_str_list(whitelist))
@@ -94,15 +93,114 @@ def _get_whitelist_words(blacklist: Dict[str, Any]) -> Set[str]:
94
93
  return set(words)
95
94
 
96
95
 
97
- def _unique_preserve_order(items: List[str]) -> List[str]:
98
- seen: Set[str] = set()
99
- out: List[str] = []
100
- for item in items:
101
- if item in seen:
102
- continue
103
- seen.add(item)
104
- out.append(item)
105
- return out
96
+ def _collect_entries(blacklist: Dict[str, Any], whitelist: Set[str]) -> List[WordEntry]:
97
+ category_metadata = blacklist.get("category_metadata") if isinstance(blacklist.get("category_metadata"), dict) else {}
98
+ entries_by_word: Dict[str, WordEntry] = {}
99
+
100
+ def register(entry: WordEntry) -> None:
101
+ if not entry.word or entry.word in whitelist:
102
+ return
103
+ existing = entries_by_word.get(entry.word)
104
+ if existing is None:
105
+ entries_by_word[entry.word] = entry
106
+ return
107
+ # Prefer categorized entries because they carry richer metadata.
108
+ if existing.category is None and entry.category is not None:
109
+ entries_by_word[entry.word] = entry
110
+ return
111
+ replacement_hint = existing.replacement_hint or entry.replacement_hint
112
+ per_chapter_max = existing.per_chapter_max if existing.per_chapter_max is not None else entry.per_chapter_max
113
+ context = existing.context or entry.context
114
+ entries_by_word[entry.word] = WordEntry(
115
+ word=entry.word,
116
+ category=existing.category or entry.category,
117
+ replacement_hint=replacement_hint,
118
+ per_chapter_max=per_chapter_max,
119
+ context=context,
120
+ )
121
+
122
+ for word in _as_str_list(blacklist.get("words")):
123
+ register(WordEntry(word=word, category=None, replacement_hint=None, per_chapter_max=None, context=None))
124
+
125
+ categories = blacklist.get("categories")
126
+ if isinstance(categories, dict):
127
+ for category, raw_items in categories.items():
128
+ metadata = category_metadata.get(category) if isinstance(category_metadata, dict) else None
129
+ context = metadata.get("context") if isinstance(metadata, dict) and isinstance(metadata.get("context"), str) else None
130
+ if not isinstance(raw_items, list):
131
+ continue
132
+ for raw_item in raw_items:
133
+ if isinstance(raw_item, str):
134
+ word = raw_item.strip()
135
+ if not word:
136
+ continue
137
+ register(WordEntry(word=word, category=category, replacement_hint=None, per_chapter_max=None, context=context))
138
+ continue
139
+ if not isinstance(raw_item, dict):
140
+ continue
141
+ word = raw_item.get("word")
142
+ if not isinstance(word, str) or not word.strip():
143
+ continue
144
+ per_chapter_max = raw_item.get("per_chapter_max")
145
+ if not isinstance(per_chapter_max, int) or per_chapter_max < 0:
146
+ per_chapter_max = None
147
+ replacement_hint = raw_item.get("replacement_hint") if isinstance(raw_item.get("replacement_hint"), str) else None
148
+ register(
149
+ WordEntry(
150
+ word=word.strip(),
151
+ category=category,
152
+ replacement_hint=replacement_hint,
153
+ per_chapter_max=per_chapter_max,
154
+ context=context,
155
+ )
156
+ )
157
+
158
+ entries = list(entries_by_word.values())
159
+ entries.sort(key=lambda item: (-len(item.word), item.word))
160
+ return entries
161
+
162
+
163
+ def _line_number_at(text: str, index: int) -> int:
164
+ return text.count("\n", 0, index) + 1
165
+
166
+
167
+ def _line_snippet(text: str, index: int) -> str:
168
+ start = text.rfind("\n", 0, index) + 1
169
+ end = text.find("\n", index)
170
+ if end < 0:
171
+ end = len(text)
172
+ snippet = text[start:end].strip()
173
+ return f"{snippet[:160]}…" if len(snippet) > 160 else snippet
174
+
175
+
176
+ def _build_dialogue_ranges(text: str) -> Tuple[List[Tuple[int, int]], int, int]:
177
+ ranges: List[Tuple[int, int]] = []
178
+ in_dialogue = False
179
+ start = -1
180
+ open_count = 0
181
+ close_count = 0
182
+ for index, char in enumerate(text):
183
+ if char == "“":
184
+ open_count += 1
185
+ if not in_dialogue:
186
+ in_dialogue = True
187
+ start = index
188
+ elif char == "”":
189
+ close_count += 1
190
+ if in_dialogue:
191
+ ranges.append((start, index + 1))
192
+ in_dialogue = False
193
+ start = -1
194
+ if in_dialogue and start >= 0:
195
+ ranges.append((start, len(text)))
196
+ return ranges, open_count, close_count
197
+
198
+
199
+ def _in_dialogue(index: int, ranges: List[Tuple[int, int]]) -> bool:
200
+ for start, end in ranges:
201
+ if start <= index < end:
202
+ return True
203
+ return False
106
204
 
107
205
 
108
206
  def main() -> None:
@@ -113,89 +211,136 @@ def main() -> None:
113
211
  if not isinstance(blacklist, dict):
114
212
  _die("lint-blacklist.sh: ai-blacklist.json must be a JSON object", 1)
115
213
 
116
- words = blacklist.get("words")
117
- if not isinstance(words, list) or not all(isinstance(w, str) for w in words):
118
- _die("lint-blacklist.sh: ai-blacklist.json.words must be a list of strings", 1)
119
-
120
214
  whitelist = _get_whitelist_words(blacklist)
121
-
122
- effective_words = [w.strip() for w in words if isinstance(w, str) and w.strip() and w.strip() not in whitelist]
123
- effective_words = list(dict.fromkeys(effective_words)) # dedup preserving order
124
-
125
- # Sort by length descending to match longest phrases first
126
- effective_words.sort(key=lambda w: -len(w))
215
+ entries = _collect_entries(blacklist, whitelist)
127
216
 
128
217
  try:
129
- with open(chapter_path, "r", encoding="utf-8") as f:
130
- text = f.read()
131
- except Exception as e:
132
- _die(f"lint-blacklist.sh: failed to read chapter: {e}", 1)
218
+ with open(chapter_path, "r", encoding="utf-8-sig") as handle:
219
+ text = handle.read().replace("\r\n", "\n").replace("\r", "\n")
220
+ except Exception as exc:
221
+ _die(f"lint-blacklist.sh: failed to read chapter: {exc}", 1)
133
222
 
134
- lines = text.splitlines()
223
+ dialogue_ranges, open_count, close_count = _build_dialogue_ranges(text)
135
224
  non_ws_chars = len(re.sub(r"\s+", "", text))
136
225
 
137
- # Use a working copy for masking matched phrases
138
- masked_text = text
226
+ warnings: List[Dict[str, Any]] = []
227
+ total_quotes = open_count + close_count
228
+ if total_quotes % 2 != 0 or open_count != close_count:
229
+ warnings.append(
230
+ {
231
+ "code": "quote_parity_mismatch",
232
+ "message": f"Chinese quote parity mismatch: “={open_count}, ”={close_count}, total={total_quotes}.",
233
+ }
234
+ )
139
235
 
236
+ masked = text
140
237
  hits: List[Dict[str, Any]] = []
141
238
  total_hits = 0
142
-
143
- for word in effective_words:
144
- count = masked_text.count(word)
145
- if count <= 0:
239
+ narration_connector_count = 0
240
+ per_limit_hits: List[Dict[str, Any]] = []
241
+
242
+ for entry in entries:
243
+ occurrences: List[int] = []
244
+ search_from = 0
245
+ while True:
246
+ index = masked.find(entry.word, search_from)
247
+ if index < 0:
248
+ break
249
+ search_from = index + len(entry.word)
250
+ in_dialogue = _in_dialogue(index, dialogue_ranges)
251
+ if entry.context == "narration_only" and in_dialogue:
252
+ continue
253
+ occurrences.append(index)
254
+ masked = masked[:index] + ("\x00" * len(entry.word)) + masked[index + len(entry.word):]
255
+
256
+ count = len(occurrences)
257
+ if count == 0:
146
258
  continue
259
+
147
260
  total_hits += count
261
+ if entry.category == "narration_connector":
262
+ narration_connector_count += count
148
263
 
149
- # Collect line numbers and snippets from ORIGINAL text
150
- line_numbers: List[int] = []
264
+ lines: List[int] = []
151
265
  snippets: List[str] = []
152
- for idx, line in enumerate(lines, start=1):
153
- if word in line:
154
- line_numbers.append(idx)
155
- if len(snippets) < 5:
156
- snippet = line.strip()
157
- if len(snippet) > 160:
158
- snippet = snippet[:160] + "…"
159
- snippets.append(snippet)
160
-
161
- hits.append(
162
- {
163
- "word": word,
164
- "count": count,
165
- "lines": line_numbers[:20],
166
- "snippets": snippets,
167
- }
168
- )
169
-
170
- # Mask matched word in working copy to prevent substring double-counting
171
- masked_text = masked_text.replace(word, "\x00" * len(word))
172
-
173
- hits.sort(key=lambda x: (-int(x["count"]), str(x["word"])))
266
+ contexts: List[str] = []
267
+ for index in occurrences:
268
+ lines.append(_line_number_at(text, index))
269
+ if len(snippets) < 5:
270
+ snippets.append(_line_snippet(text, index))
271
+ contexts.append("dialogue" if _in_dialogue(index, dialogue_ranges) else "narration")
272
+
273
+ hit_obj: Dict[str, Any] = {
274
+ "word": entry.word,
275
+ "count": count,
276
+ "lines": lines[:20],
277
+ "snippets": snippets,
278
+ "contexts": sorted(set(contexts)),
279
+ }
280
+ if entry.category is not None:
281
+ hit_obj["category"] = entry.category
282
+ if entry.replacement_hint:
283
+ hit_obj["replacement_hint"] = entry.replacement_hint
284
+ if entry.per_chapter_max is not None:
285
+ hit_obj["per_chapter_max"] = entry.per_chapter_max
286
+ if count > entry.per_chapter_max:
287
+ per_limit_hits.append(
288
+ {
289
+ "word": entry.word,
290
+ "count": count,
291
+ "per_chapter_max": entry.per_chapter_max,
292
+ "category": entry.category,
293
+ "replacement_hint": entry.replacement_hint,
294
+ }
295
+ )
296
+ warnings.append(
297
+ {
298
+ "code": "per_chapter_max_exceeded",
299
+ "message": f"{entry.word} appeared {count} times (limit {entry.per_chapter_max}).",
300
+ "word": entry.word,
301
+ "count": count,
302
+ "per_chapter_max": entry.per_chapter_max,
303
+ }
304
+ )
305
+ hits.append(hit_obj)
306
+
307
+ hits.sort(key=lambda item: (-int(item["count"]), str(item["word"])))
308
+ per_limit_hits.sort(key=lambda item: (-int(item["count"]), str(item["word"])))
174
309
 
175
310
  hits_per_kchars = 0.0
176
311
  if non_ws_chars > 0:
177
312
  hits_per_kchars = total_hits / (non_ws_chars / 1000.0)
178
313
 
179
- out: Dict[str, Any] = {
314
+ unique_words_count = len(entries)
315
+ words_flat_count = len(_as_str_list(blacklist.get("words")))
316
+
317
+ output: Dict[str, Any] = {
180
318
  "chapter_path": chapter_path,
181
319
  "blacklist_path": blacklist_path,
182
320
  "chars": non_ws_chars,
183
- "blacklist_words_count": len(words),
321
+ "blacklist_words_count": unique_words_count,
322
+ "flat_words_count": words_flat_count,
184
323
  "whitelist_words_count": len(whitelist),
185
- "effective_words_count": len(effective_words),
324
+ "effective_words_count": unique_words_count,
186
325
  "total_hits": total_hits,
187
326
  "hits_per_kchars": round(hits_per_kchars, 3),
188
327
  "hits": hits,
328
+ "warnings": warnings,
329
+ "per_chapter_limit_hits": per_limit_hits,
330
+ "statistical_profile": {
331
+ "blacklist_hit_rate": round(hits_per_kchars, 3),
332
+ "narration_connector_count": narration_connector_count,
333
+ },
189
334
  }
190
335
 
191
- sys.stdout.write(json.dumps(out, ensure_ascii=False) + "\n")
336
+ sys.stdout.write(json.dumps(output, ensure_ascii=False) + "\n")
192
337
 
193
338
 
194
339
  try:
195
340
  main()
196
341
  except SystemExit:
197
342
  raise
198
- except Exception as e:
199
- sys.stderr.write(f"lint-blacklist.sh: unexpected error: {e}\n")
343
+ except Exception as exc:
344
+ sys.stderr.write(f"lint-blacklist.sh: unexpected error: {exc}\n")
200
345
  raise SystemExit(2)
201
346
  PY