novel-writer-cli 0.3.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/agents/chapter-writer.md +43 -14
- package/agents/character-weaver.md +7 -1
- package/agents/plot-architect.md +20 -7
- package/agents/quality-judge.md +199 -20
- package/agents/style-analyzer.md +14 -8
- package/agents/style-refiner.md +10 -3
- package/agents/world-builder.md +8 -1
- package/dist/__tests__/agent-prompts-anti-ai-upgrade.test.js +194 -6
- package/dist/__tests__/agent-prompts-platform-expansion.test.js +33 -0
- package/dist/__tests__/anti-ai-infrastructure.test.js +548 -0
- package/dist/__tests__/anti-ai-templates.test.js +2 -2
- package/dist/__tests__/canon-status-lifecycle.test.js +481 -0
- package/dist/__tests__/commit-gate-decision.test.js +65 -0
- package/dist/__tests__/commit-prototype-pollution.test.js +1 -1
- package/dist/__tests__/excitement-type-annotation.test.js +240 -0
- package/dist/__tests__/excitement-type.test.js +21 -0
- package/dist/__tests__/gate-decision.test.js +62 -15
- package/dist/__tests__/genre-excitement-mapping.test.js +355 -0
- package/dist/__tests__/golden-chapter-gates.test.js +79 -0
- package/dist/__tests__/golden-chapter-mini-planning.test.js +485 -0
- package/dist/__tests__/helpers/quickstart-mini-planning.js +61 -0
- package/dist/__tests__/init.test.js +57 -5
- package/dist/__tests__/instructions-platform-expansion.test.js +125 -0
- package/dist/__tests__/next-step-gate-decision-routing.test.js +98 -0
- package/dist/__tests__/orchestrator-state-write-path.test.js +1 -1
- package/dist/__tests__/platform-profile.test.js +57 -1
- package/dist/__tests__/quickstart-pipeline.test.js +73 -6
- package/dist/__tests__/scoring-weights.test.js +193 -0
- package/dist/__tests__/steps-id.test.js +2 -0
- package/dist/__tests__/validate-quickstart-prereqs.test.js +2 -0
- package/dist/advance.js +27 -2
- package/dist/anti-ai-context.js +535 -0
- package/dist/cli.js +3 -1
- package/dist/commit.js +22 -0
- package/dist/excitement-type.js +12 -0
- package/dist/gate-decision.js +98 -2
- package/dist/golden-chapter-gates.js +143 -0
- package/dist/init.js +76 -7
- package/dist/instructions.js +552 -6
- package/dist/next-step.js +124 -88
- package/dist/platform-profile.js +20 -8
- package/dist/quickstart-mini-planning.js +30 -0
- package/dist/scoring-weights.js +38 -3
- package/dist/steps.js +1 -1
- package/dist/validate.js +293 -214
- package/dist/volume-commit.js +271 -5
- package/dist/volume-planning.js +78 -3
- package/docs/user/README.md +1 -0
- package/docs/user/migration-guide.md +166 -0
- package/docs/user/novel-cli.md +4 -3
- package/docs/user/quick-start.md +354 -57
- package/package.json +1 -1
- package/schemas/platform-profile.schema.json +2 -2
- package/scripts/lint-blacklist.sh +221 -76
- package/scripts/lint-structural.sh +538 -0
- package/skills/continue/SKILL.md +6 -0
- package/skills/continue/references/context-contracts.md +71 -6
- package/skills/continue/references/periodic-maintenance.md +12 -1
- package/skills/novel-writing/references/quality-rubric.md +79 -26
- package/skills/novel-writing/references/style-guide.md +129 -19
- package/skills/start/SKILL.md +23 -3
- package/skills/start/references/vol-planning.md +12 -3
- package/templates/ai-blacklist.json +1024 -246
- package/templates/ai-sentence-patterns.json +167 -0
- package/templates/genre-excitement-map.json +48 -0
- package/templates/genre-golden-standards.json +80 -0
- package/templates/genre-weight-profiles.json +15 -0
- package/templates/golden-chapter-gates.json +230 -0
- package/templates/novel-ask/example.question.json +3 -2
- package/templates/platform-profile.json +141 -1
- package/templates/platforms/fanqie.md +35 -0
- package/templates/platforms/jinjiang.md +35 -0
- package/templates/platforms/qidian.md +35 -0
- package/templates/style-profile-template.json +3 -0
|
@@ -0,0 +1,538 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
#
|
|
3
|
+
# Deterministic structural anti-AI linter (issue 139).
|
|
4
|
+
#
|
|
5
|
+
# Usage:
|
|
6
|
+
# lint-structural.sh <chapter.md> [--genre <genre>] [--config <override.json>]
|
|
7
|
+
#
|
|
8
|
+
# Output:
|
|
9
|
+
# stdout JSON (exit 0 on success)
|
|
10
|
+
|
|
11
|
+
set -euo pipefail
|
|
12
|
+
|
|
13
|
+
if [ "$#" -lt 1 ]; then
|
|
14
|
+
echo "Usage: lint-structural.sh <chapter.md> [--genre <genre>] [--config <override.json>]" >&2
|
|
15
|
+
exit 1
|
|
16
|
+
fi
|
|
17
|
+
|
|
18
|
+
if ! command -v python3 >/dev/null 2>&1; then
|
|
19
|
+
echo "lint-structural.sh: python3 is required but not found" >&2
|
|
20
|
+
exit 2
|
|
21
|
+
fi
|
|
22
|
+
|
|
23
|
+
python3 - "$@" <<'PY'
|
|
24
|
+
import json
|
|
25
|
+
import re
|
|
26
|
+
import sys
|
|
27
|
+
from datetime import datetime, timezone
|
|
28
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
29
|
+
|
|
30
|
+
GENRE_ALIASES = {
|
|
31
|
+
"玄幻": "xuanhuan",
|
|
32
|
+
"xuanhuan": "xuanhuan",
|
|
33
|
+
"都市": "dushi",
|
|
34
|
+
"dushi": "dushi",
|
|
35
|
+
"历史": "history",
|
|
36
|
+
"history": "history",
|
|
37
|
+
"科幻": "scifi",
|
|
38
|
+
"sci-fi": "scifi",
|
|
39
|
+
"scifi": "scifi",
|
|
40
|
+
"science-fiction": "scifi",
|
|
41
|
+
"悬疑": "suspense",
|
|
42
|
+
"mystery": "suspense",
|
|
43
|
+
"suspense": "suspense",
|
|
44
|
+
"恐怖": "horror",
|
|
45
|
+
"horror": "horror",
|
|
46
|
+
"言情": "romance",
|
|
47
|
+
"romance": "romance",
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
DEFAULT_THRESHOLDS: Dict[str, Any] = {
|
|
51
|
+
"l2": {
|
|
52
|
+
"window_chars": 300,
|
|
53
|
+
"emphasis_words": ["极其", "非常", "十分", "无比"],
|
|
54
|
+
"emphasis_max": 2,
|
|
55
|
+
"adjective_max": 6,
|
|
56
|
+
"adjective_words": [
|
|
57
|
+
"冰冷", "漆黑", "沉重", "压抑", "潮湿", "苍白", "急促", "阴冷", "炽热", "巨大",
|
|
58
|
+
"明亮", "耀眼", "温柔", "优雅", "柔弱", "安静", "模糊", "疲惫", "猩红", "刺骨",
|
|
59
|
+
],
|
|
60
|
+
},
|
|
61
|
+
"l3": {
|
|
62
|
+
"window_chars": 500,
|
|
63
|
+
"idiom_max": 3,
|
|
64
|
+
"paragraph_max": 2,
|
|
65
|
+
"idioms": [
|
|
66
|
+
"心潮澎湃", "热血沸腾", "激动万分", "波澜壮阔", "惊心动魄", "风起云涌", "不寒而栗", "毛骨悚然",
|
|
67
|
+
"心有余悸", "心惊肉跳", "怒火中烧", "屏住呼吸", "若无其事", "若有所思", "目不转睛", "跌宕起伏",
|
|
68
|
+
],
|
|
69
|
+
},
|
|
70
|
+
"l5": {
|
|
71
|
+
"single_sentence_ratio": [0.25, 0.45],
|
|
72
|
+
"paragraph_char_max": 100,
|
|
73
|
+
"similar_paragraph_delta": 10,
|
|
74
|
+
"similar_paragraph_run": 3,
|
|
75
|
+
},
|
|
76
|
+
"l6": {
|
|
77
|
+
"ellipsis_per_paragraph_max": 1,
|
|
78
|
+
"ellipsis_per_chapter_max": 5,
|
|
79
|
+
"exclamation_per_paragraph_max": 1,
|
|
80
|
+
"exclamation_per_chapter_max": 8,
|
|
81
|
+
"em_dash_per_chapter_max": 0,
|
|
82
|
+
},
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
GENRE_OVERRIDES: Dict[str, Dict[str, Any]] = {
|
|
86
|
+
"xuanhuan": {},
|
|
87
|
+
"dushi": {},
|
|
88
|
+
"history": {},
|
|
89
|
+
"scifi": {
|
|
90
|
+
"l5": {"single_sentence_ratio": [0.15, 0.30], "paragraph_char_max": 120},
|
|
91
|
+
"l6": {"exclamation_per_chapter_max": 5},
|
|
92
|
+
},
|
|
93
|
+
"suspense": {
|
|
94
|
+
"l5": {"single_sentence_ratio": [0.20, 0.35], "paragraph_char_max": 100},
|
|
95
|
+
"l6": {"ellipsis_per_chapter_max": 8},
|
|
96
|
+
},
|
|
97
|
+
"horror": {
|
|
98
|
+
"l5": {"single_sentence_ratio": [0.30, 0.50]},
|
|
99
|
+
"l6": {"ellipsis_per_chapter_max": 8},
|
|
100
|
+
},
|
|
101
|
+
"romance": {},
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def die(message: str, code: int = 1) -> None:
|
|
106
|
+
sys.stderr.write(message.rstrip() + "\n")
|
|
107
|
+
raise SystemExit(code)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def deep_merge(base: Dict[str, Any], update: Dict[str, Any]) -> Dict[str, Any]:
|
|
111
|
+
out = json.loads(json.dumps(base, ensure_ascii=False))
|
|
112
|
+
for key, value in update.items():
|
|
113
|
+
if isinstance(value, dict) and isinstance(out.get(key), dict):
|
|
114
|
+
out[key] = deep_merge(out[key], value)
|
|
115
|
+
else:
|
|
116
|
+
out[key] = value
|
|
117
|
+
return out
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def normalize_genre(raw: str) -> Optional[str]:
|
|
121
|
+
if not raw:
|
|
122
|
+
return None
|
|
123
|
+
value = raw.strip()
|
|
124
|
+
return GENRE_ALIASES.get(value) or GENRE_ALIASES.get(value.lower())
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def parse_args(argv: List[str]) -> Tuple[str, Optional[str], Optional[str]]:
|
|
128
|
+
chapter_path = argv[1]
|
|
129
|
+
genre = None
|
|
130
|
+
config_path = None
|
|
131
|
+
index = 2
|
|
132
|
+
while index < len(argv):
|
|
133
|
+
token = argv[index]
|
|
134
|
+
if token == "--genre":
|
|
135
|
+
index += 1
|
|
136
|
+
if index >= len(argv):
|
|
137
|
+
die("lint-structural.sh: --genre requires a value", 1)
|
|
138
|
+
genre = normalize_genre(argv[index])
|
|
139
|
+
if genre is None:
|
|
140
|
+
die(f"lint-structural.sh: unsupported genre override '{argv[index]}'", 1)
|
|
141
|
+
elif token == "--config":
|
|
142
|
+
index += 1
|
|
143
|
+
if index >= len(argv):
|
|
144
|
+
die("lint-structural.sh: --config requires a value", 1)
|
|
145
|
+
config_path = argv[index]
|
|
146
|
+
else:
|
|
147
|
+
die(f"lint-structural.sh: unknown argument '{token}'", 1)
|
|
148
|
+
index += 1
|
|
149
|
+
return chapter_path, genre, config_path
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def load_text(path: str) -> str:
|
|
153
|
+
try:
|
|
154
|
+
with open(path, "r", encoding="utf-8-sig") as handle:
|
|
155
|
+
return handle.read().replace("\r\n", "\n").replace("\r", "\n")
|
|
156
|
+
except FileNotFoundError:
|
|
157
|
+
die(f"lint-structural.sh: chapter file not found: {path}", 1)
|
|
158
|
+
except Exception as exc:
|
|
159
|
+
die(f"lint-structural.sh: failed to read chapter: {exc}", 1)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def load_config(path: str) -> Dict[str, Any]:
|
|
163
|
+
try:
|
|
164
|
+
with open(path, "r", encoding="utf-8-sig") as handle:
|
|
165
|
+
data = json.load(handle)
|
|
166
|
+
except FileNotFoundError:
|
|
167
|
+
die(f"lint-structural.sh: config file not found: {path}", 1)
|
|
168
|
+
except Exception as exc:
|
|
169
|
+
die(f"lint-structural.sh: invalid JSON at {path}: {exc}", 1)
|
|
170
|
+
if not isinstance(data, dict):
|
|
171
|
+
die("lint-structural.sh: config JSON must be an object", 1)
|
|
172
|
+
return data
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def line_of(text: str, index: int) -> int:
|
|
176
|
+
return text.count("\n", 0, index) + 1
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def compact_chars(text: str) -> int:
|
|
180
|
+
return len(re.sub(r"\s+", "", text))
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def split_paragraphs(text: str) -> List[Tuple[int, int, int, str]]:
|
|
184
|
+
lines = text.replace("\r", "").split("\n")
|
|
185
|
+
paragraphs: List[Tuple[int, int, int, str]] = []
|
|
186
|
+
buffer: List[str] = []
|
|
187
|
+
start_line = 1
|
|
188
|
+
char_offset = 0
|
|
189
|
+
start_char = 0
|
|
190
|
+
for line_no, line in enumerate(lines, start=1):
|
|
191
|
+
raw_line = line
|
|
192
|
+
line = line.strip()
|
|
193
|
+
line_len = len(raw_line) + 1
|
|
194
|
+
if not line:
|
|
195
|
+
if buffer:
|
|
196
|
+
paragraph = "\n".join(buffer).strip()
|
|
197
|
+
if paragraph and not paragraph.startswith("#"):
|
|
198
|
+
paragraphs.append((start_line, start_char, start_char + len(paragraph), paragraph))
|
|
199
|
+
buffer = []
|
|
200
|
+
char_offset += line_len
|
|
201
|
+
start_line = line_no + 1
|
|
202
|
+
start_char = char_offset
|
|
203
|
+
continue
|
|
204
|
+
if not buffer:
|
|
205
|
+
start_line = line_no
|
|
206
|
+
start_char = char_offset
|
|
207
|
+
buffer.append(raw_line)
|
|
208
|
+
char_offset += line_len
|
|
209
|
+
if buffer:
|
|
210
|
+
paragraph = "\n".join(buffer).strip()
|
|
211
|
+
if paragraph and not paragraph.startswith("#"):
|
|
212
|
+
paragraphs.append((start_line, start_char, start_char + len(paragraph), paragraph))
|
|
213
|
+
return paragraphs
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def sentence_count(paragraph: str) -> int:
|
|
217
|
+
parts = [part for part in re.split(r"[。!?!?]+(?:[”」』])*", paragraph) if part.strip()]
|
|
218
|
+
return max(len(parts), 1)
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def chunks(compact_text: str, size: int) -> List[Tuple[int, str]]:
|
|
222
|
+
return [(idx, compact_text[idx:idx + size]) for idx in range(0, len(compact_text), size) if compact_text[idx:idx + size]]
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def build_compact_text_and_map(text: str) -> Tuple[str, List[int]]:
|
|
226
|
+
compact_chars_list: List[str] = []
|
|
227
|
+
compact_index_map: List[int] = []
|
|
228
|
+
for index, char in enumerate(text):
|
|
229
|
+
if char.isspace():
|
|
230
|
+
continue
|
|
231
|
+
compact_chars_list.append(char)
|
|
232
|
+
compact_index_map.append(index)
|
|
233
|
+
return "".join(compact_chars_list), compact_index_map
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def compact_range_to_original(compact_index_map: List[int], start: int, end: int) -> Tuple[int, int]:
|
|
237
|
+
if not compact_index_map:
|
|
238
|
+
return 0, 0
|
|
239
|
+
safe_start = max(0, min(start, len(compact_index_map) - 1))
|
|
240
|
+
safe_end = max(safe_start, min(max(end - 1, start), len(compact_index_map) - 1))
|
|
241
|
+
return compact_index_map[safe_start], compact_index_map[safe_end] + 1
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def add_compact_violation(
|
|
245
|
+
violations: List[Dict[str, Any]],
|
|
246
|
+
text: str,
|
|
247
|
+
compact_index_map: List[int],
|
|
248
|
+
rule_id: str,
|
|
249
|
+
severity: str,
|
|
250
|
+
compact_start: int,
|
|
251
|
+
compact_end: int,
|
|
252
|
+
description: str,
|
|
253
|
+
suggestion: str,
|
|
254
|
+
) -> None:
|
|
255
|
+
char_start, char_end = compact_range_to_original(compact_index_map, compact_start, compact_end)
|
|
256
|
+
add_violation(
|
|
257
|
+
violations,
|
|
258
|
+
rule_id,
|
|
259
|
+
severity,
|
|
260
|
+
line_of(text, char_start),
|
|
261
|
+
char_start,
|
|
262
|
+
char_end,
|
|
263
|
+
description,
|
|
264
|
+
suggestion,
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def add_violation(violations: List[Dict[str, Any]], rule_id: str, severity: str, line: int, char_start: int, char_end: int, description: str, suggestion: str) -> None:
|
|
269
|
+
violations.append({
|
|
270
|
+
"rule_id": rule_id,
|
|
271
|
+
"severity": severity,
|
|
272
|
+
"location": {"line": max(line, 1), "char_start": max(char_start, 0), "char_end": max(char_end, 0)},
|
|
273
|
+
"description": description,
|
|
274
|
+
"suggestion": suggestion,
|
|
275
|
+
})
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def main() -> None:
|
|
279
|
+
chapter_path, genre, config_path = parse_args(sys.argv)
|
|
280
|
+
thresholds = deep_merge(DEFAULT_THRESHOLDS, GENRE_OVERRIDES.get(genre or "", {}))
|
|
281
|
+
if config_path:
|
|
282
|
+
config = load_config(config_path)
|
|
283
|
+
thresholds = deep_merge(thresholds, config.get("thresholds", config))
|
|
284
|
+
|
|
285
|
+
text = load_text(chapter_path)
|
|
286
|
+
compact_text, compact_index_map = build_compact_text_and_map(text)
|
|
287
|
+
paragraphs = split_paragraphs(text)
|
|
288
|
+
violations: List[Dict[str, Any]] = []
|
|
289
|
+
|
|
290
|
+
# L2 adjective/adverb density
|
|
291
|
+
for offset, chunk_text in chunks(compact_text, int(thresholds["l2"]["window_chars"])):
|
|
292
|
+
emphasis = sum(chunk_text.count(word) for word in thresholds["l2"]["emphasis_words"])
|
|
293
|
+
adjectives = sum(chunk_text.count(word) for word in thresholds["l2"]["adjective_words"])
|
|
294
|
+
if emphasis > int(thresholds["l2"]["emphasis_max"]):
|
|
295
|
+
add_compact_violation(
|
|
296
|
+
violations,
|
|
297
|
+
text,
|
|
298
|
+
compact_index_map,
|
|
299
|
+
"L2.emphasis_density",
|
|
300
|
+
"warning",
|
|
301
|
+
offset,
|
|
302
|
+
offset + len(chunk_text),
|
|
303
|
+
f"300 字窗口内强调词 {emphasis} 个,超过上限 {thresholds['l2']['emphasis_max']}。",
|
|
304
|
+
"删掉抽象加强词,改为具体动作、程度或感官反馈。",
|
|
305
|
+
)
|
|
306
|
+
if adjectives > int(thresholds["l2"]["adjective_max"]):
|
|
307
|
+
add_compact_violation(
|
|
308
|
+
violations,
|
|
309
|
+
text,
|
|
310
|
+
compact_index_map,
|
|
311
|
+
"L2.adjective_density",
|
|
312
|
+
"warning",
|
|
313
|
+
offset,
|
|
314
|
+
offset + len(chunk_text),
|
|
315
|
+
f"300 字窗口内描述性修饰词命中 {adjectives} 次,超过上限 {thresholds['l2']['adjective_max']}。",
|
|
316
|
+
"减少形容词/副词堆叠,把信息拆到动作和状态变化里。",
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
adjective_pattern = "|".join(sorted(thresholds["l2"]["adjective_words"], key=len, reverse=True))
|
|
320
|
+
if adjective_pattern:
|
|
321
|
+
for match in re.finditer(rf"(?:{adjective_pattern}){{2,}}的", compact_text):
|
|
322
|
+
add_compact_violation(
|
|
323
|
+
violations,
|
|
324
|
+
text,
|
|
325
|
+
compact_index_map,
|
|
326
|
+
"L2.consecutive_adjectives",
|
|
327
|
+
"error",
|
|
328
|
+
match.start(),
|
|
329
|
+
match.end(),
|
|
330
|
+
"检测到连续两个以上形容词修饰同一名词。",
|
|
331
|
+
"保留最有力的 1 个修饰词,其余改写成动作或结果。",
|
|
332
|
+
)
|
|
333
|
+
for match in re.finditer(r"[\u4e00-\u9fff]{1,6}的[\u4e00-\u9fff]{1,6}的[\u4e00-\u9fff]{1,6}", compact_text):
|
|
334
|
+
add_compact_violation(
|
|
335
|
+
violations,
|
|
336
|
+
text,
|
|
337
|
+
compact_index_map,
|
|
338
|
+
"L2.de_chain",
|
|
339
|
+
"warning",
|
|
340
|
+
match.start(),
|
|
341
|
+
match.end(),
|
|
342
|
+
"检测到连续三段“的”字链。",
|
|
343
|
+
"拆句或改写结构,避免“XX的XX的XX”连续套接。",
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
# L3 four-character idiom density
|
|
347
|
+
idioms = thresholds["l3"]["idioms"]
|
|
348
|
+
for offset, chunk_text in chunks(compact_text, int(thresholds["l3"]["window_chars"])):
|
|
349
|
+
idiom_count = sum(chunk_text.count(idiom) for idiom in idioms)
|
|
350
|
+
if idiom_count > int(thresholds["l3"]["idiom_max"]):
|
|
351
|
+
add_compact_violation(
|
|
352
|
+
violations,
|
|
353
|
+
text,
|
|
354
|
+
compact_index_map,
|
|
355
|
+
"L3.idiom_density",
|
|
356
|
+
"warning",
|
|
357
|
+
offset,
|
|
358
|
+
offset + len(chunk_text),
|
|
359
|
+
f"500 字窗口内四字词组命中 {idiom_count} 个,超过上限 {thresholds['l3']['idiom_max']}。",
|
|
360
|
+
"减少套话式四字词组,优先保留 1 个最有力的表达。",
|
|
361
|
+
)
|
|
362
|
+
for index, (line_no, start_char, end_char, paragraph) in enumerate(paragraphs, start=1):
|
|
363
|
+
paragraph_count = sum(paragraph.count(idiom) for idiom in idioms)
|
|
364
|
+
if paragraph_count > int(thresholds["l3"]["paragraph_max"]):
|
|
365
|
+
add_violation(
|
|
366
|
+
violations,
|
|
367
|
+
"L3.idiom_paragraph_density",
|
|
368
|
+
"warning",
|
|
369
|
+
line_no,
|
|
370
|
+
start_char,
|
|
371
|
+
end_char,
|
|
372
|
+
f"第 {index} 段四字词组命中 {paragraph_count} 个,超过单段上限 {thresholds['l3']['paragraph_max']}。",
|
|
373
|
+
"把四字词组拆散到动作、对白或感官细节里。",
|
|
374
|
+
)
|
|
375
|
+
if re.search(r"(?:" + "|".join(idioms) + r")(?:、|,|,)+(?:" + "|".join(idioms) + r")", paragraph):
|
|
376
|
+
add_violation(
|
|
377
|
+
violations,
|
|
378
|
+
"L3.idiom_chain",
|
|
379
|
+
"error",
|
|
380
|
+
line_no,
|
|
381
|
+
start_char,
|
|
382
|
+
end_char,
|
|
383
|
+
"检测到四字词组连续并列使用。",
|
|
384
|
+
"只保留一个四字词组,其余改成具体动作、反应或后果。",
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
# L5 paragraph structure
|
|
388
|
+
if paragraphs:
|
|
389
|
+
ratio = sum(1 for _, _, _, paragraph in paragraphs if sentence_count(paragraph) == 1) / len(paragraphs)
|
|
390
|
+
ratio_min, ratio_max = thresholds["l5"]["single_sentence_ratio"]
|
|
391
|
+
if ratio < ratio_min or ratio > ratio_max:
|
|
392
|
+
add_violation(
|
|
393
|
+
violations,
|
|
394
|
+
"L5.single_sentence_ratio",
|
|
395
|
+
"warning",
|
|
396
|
+
paragraphs[0][0],
|
|
397
|
+
paragraphs[0][1],
|
|
398
|
+
paragraphs[-1][2],
|
|
399
|
+
f"单句段占比为 {ratio:.2%},超出目标范围 {ratio_min:.0%}-{ratio_max:.0%}。",
|
|
400
|
+
"交错安排单句段、短段和中段,避免整章呼吸节奏过于整齐。",
|
|
401
|
+
)
|
|
402
|
+
paragraph_char_max = int(thresholds["l5"]["paragraph_char_max"])
|
|
403
|
+
for index, (line_no, start_char, end_char, paragraph) in enumerate(paragraphs, start=1):
|
|
404
|
+
char_count = compact_chars(paragraph)
|
|
405
|
+
if char_count > paragraph_char_max:
|
|
406
|
+
add_violation(
|
|
407
|
+
violations,
|
|
408
|
+
"L5.paragraph_char_max",
|
|
409
|
+
"warning",
|
|
410
|
+
line_no,
|
|
411
|
+
start_char,
|
|
412
|
+
end_char,
|
|
413
|
+
f"第 {index} 段长度 {char_count} 字,超过上限 {paragraph_char_max}。",
|
|
414
|
+
"优先拆成 2 段,让信息在动作或对白处换气。",
|
|
415
|
+
)
|
|
416
|
+
run = int(thresholds["l5"].get("similar_paragraph_run", 3))
|
|
417
|
+
delta = int(thresholds["l5"]["similar_paragraph_delta"])
|
|
418
|
+
for start in range(0, max(len(paragraphs) - run + 1, 0)):
|
|
419
|
+
window = paragraphs[start:start + run]
|
|
420
|
+
lengths = [compact_chars(item[3]) for item in window]
|
|
421
|
+
if max(lengths) - min(lengths) <= delta:
|
|
422
|
+
add_violation(
|
|
423
|
+
violations,
|
|
424
|
+
"L5.similar_paragraph_lengths",
|
|
425
|
+
"warning",
|
|
426
|
+
window[0][0],
|
|
427
|
+
window[0][1],
|
|
428
|
+
window[-1][2],
|
|
429
|
+
f"连续 {run} 段长度过于接近({lengths},允许波动 ±{delta} 字)。",
|
|
430
|
+
"主动拉开长短段差异,制造“长-短-短-长”的呼吸感。",
|
|
431
|
+
)
|
|
432
|
+
break
|
|
433
|
+
|
|
434
|
+
# L6 punctuation rhythm
|
|
435
|
+
ellipsis_count = len(re.findall(r"……", text))
|
|
436
|
+
exclamation_count = text.count("!")
|
|
437
|
+
em_dash_count = len(re.findall(r"——", text))
|
|
438
|
+
if ellipsis_count > int(thresholds["l6"]["ellipsis_per_chapter_max"]):
|
|
439
|
+
add_violation(
|
|
440
|
+
violations,
|
|
441
|
+
"L6.ellipsis_per_chapter",
|
|
442
|
+
"warning",
|
|
443
|
+
1,
|
|
444
|
+
0,
|
|
445
|
+
len(text),
|
|
446
|
+
f"全章省略号 {ellipsis_count} 个,超过上限 {thresholds['l6']['ellipsis_per_chapter_max']}。",
|
|
447
|
+
"删掉情绪占位式省略号,改用停顿描写或断句。",
|
|
448
|
+
)
|
|
449
|
+
if exclamation_count > int(thresholds["l6"]["exclamation_per_chapter_max"]):
|
|
450
|
+
add_violation(
|
|
451
|
+
violations,
|
|
452
|
+
"L6.exclamation_per_chapter",
|
|
453
|
+
"warning",
|
|
454
|
+
1,
|
|
455
|
+
0,
|
|
456
|
+
len(text),
|
|
457
|
+
f"全章感叹号 {exclamation_count} 个,超过上限 {thresholds['l6']['exclamation_per_chapter_max']}。",
|
|
458
|
+
"减少感叹号,改用动作、语气和句式变化传达情绪强度。",
|
|
459
|
+
)
|
|
460
|
+
if em_dash_count > int(thresholds["l6"]["em_dash_per_chapter_max"]):
|
|
461
|
+
add_violation(
|
|
462
|
+
violations,
|
|
463
|
+
"L6.em_dash_per_chapter",
|
|
464
|
+
"warning",
|
|
465
|
+
1,
|
|
466
|
+
0,
|
|
467
|
+
len(text),
|
|
468
|
+
f"全章破折号 {em_dash_count} 个,超过上限 {thresholds['l6']['em_dash_per_chapter_max']}。",
|
|
469
|
+
"把破折号改成逗号、句号、省略号或重组句式。",
|
|
470
|
+
)
|
|
471
|
+
for index, (line_no, start_char, end_char, paragraph) in enumerate(paragraphs, start=1):
|
|
472
|
+
ellipsis = len(re.findall(r"……", paragraph))
|
|
473
|
+
exclamations = paragraph.count("!")
|
|
474
|
+
if ellipsis > int(thresholds["l6"]["ellipsis_per_paragraph_max"]):
|
|
475
|
+
add_violation(
|
|
476
|
+
violations,
|
|
477
|
+
"L6.ellipsis_per_paragraph",
|
|
478
|
+
"warning",
|
|
479
|
+
line_no,
|
|
480
|
+
start_char,
|
|
481
|
+
end_char,
|
|
482
|
+
f"第 {index} 段省略号 {ellipsis} 个,超过单段上限 {thresholds['l6']['ellipsis_per_paragraph_max']}。",
|
|
483
|
+
"同一段只保留 1 个省略号,其余改为停顿描写或断句。",
|
|
484
|
+
)
|
|
485
|
+
if exclamations > int(thresholds["l6"]["exclamation_per_paragraph_max"]):
|
|
486
|
+
add_violation(
|
|
487
|
+
violations,
|
|
488
|
+
"L6.exclamation_per_paragraph",
|
|
489
|
+
"warning",
|
|
490
|
+
line_no,
|
|
491
|
+
start_char,
|
|
492
|
+
end_char,
|
|
493
|
+
f"第 {index} 段感叹号 {exclamations} 个,超过单段上限 {thresholds['l6']['exclamation_per_paragraph_max']}。",
|
|
494
|
+
"同一段只保留 1 个感叹号,把其他强度落到动作和语气上。",
|
|
495
|
+
)
|
|
496
|
+
for pattern, rule_id, label in [
|
|
497
|
+
(r"(?:……+!|!+……)", "L6.mixed_ellipsis_exclamation", "省略号与感叹号连用"),
|
|
498
|
+
(r"??+", "L6.repeated_question_marks", "问号连用"),
|
|
499
|
+
(r"!!+", "L6.repeated_exclamation_marks", "感叹号连用"),
|
|
500
|
+
]:
|
|
501
|
+
for match in re.finditer(pattern, text):
|
|
502
|
+
add_violation(
|
|
503
|
+
violations,
|
|
504
|
+
rule_id,
|
|
505
|
+
"error",
|
|
506
|
+
line_of(text, match.start()),
|
|
507
|
+
match.start(),
|
|
508
|
+
match.end(),
|
|
509
|
+
f"检测到 {label}。",
|
|
510
|
+
"删除重复/叠加标点,把节奏写回正文动作和句式。",
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
report = {
|
|
514
|
+
"schema_version": 1,
|
|
515
|
+
"generated_at": datetime.now(timezone.utc).isoformat(),
|
|
516
|
+
"chapter_path": chapter_path,
|
|
517
|
+
"genre": genre,
|
|
518
|
+
"chapter_chars": len(compact_text),
|
|
519
|
+
"thresholds": thresholds,
|
|
520
|
+
"violations": violations,
|
|
521
|
+
"summary": {
|
|
522
|
+
"warning_count": sum(1 for item in violations if item["severity"] == "warning"),
|
|
523
|
+
"error_count": sum(1 for item in violations if item["severity"] == "error"),
|
|
524
|
+
"violation_count": len(violations),
|
|
525
|
+
"total": len(violations),
|
|
526
|
+
},
|
|
527
|
+
}
|
|
528
|
+
sys.stdout.write(json.dumps(report, ensure_ascii=False) + "\n")
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
try:
|
|
532
|
+
main()
|
|
533
|
+
except SystemExit:
|
|
534
|
+
raise
|
|
535
|
+
except Exception as exc:
|
|
536
|
+
sys.stderr.write(f"lint-structural.sh: unexpected error: {exc}\n")
|
|
537
|
+
raise SystemExit(2)
|
|
538
|
+
PY
|
package/skills/continue/SKILL.md
CHANGED
|
@@ -41,6 +41,12 @@ continue 特有规则:
|
|
|
41
41
|
|
|
42
42
|
- 遇到 `commit --chapter X` 且执行成功:`committed_chapters += 1`
|
|
43
43
|
- commit 完成后可运行 `${NOVEL} next --json` 确认下一步(或直接进入下一轮 loop)
|
|
44
|
+
- 对写作类 packet(尤其 `chapter:*:draft` / `quickstart:trial`),若 `packet.manifest.paths.platform_writing_guide` 存在,按原样传给 `chapter-writer`
|
|
45
|
+
- 对评分类 packet(尤其 `chapter:*:judge` / `quickstart:results`),若 `chapter <= 3` 且项目存在 `golden-chapter-gates.json`,CLI 生成的 packet 可能包含 `packet.manifest.inline.golden_chapter_gates`;不要在 skill 层重算或改写
|
|
46
|
+
- 对评分类 packet(尤其 `chapter:*:judge` / `quickstart:results`),若 `chapter <= 3` 且 `brief.md` 的题材能在 `genre-golden-standards.json` 中匹配,CLI 还可能注入 `packet.manifest.inline.genre_golden_standards`;这是题材特定的 `focus_dimensions / criteria / minimum_thresholds`,需与 `golden_chapter_gates` 叠加评估;skill 层只透传,不要自行补造或改写
|
|
47
|
+
- 对 `chapter:*:draft` / `chapter:*:judge` packet,CLI 会按 `canon_status` 过滤上下文:仅 `established`(或缺失字段)规则进入 `hard_rules_list`,`planned` 规则进入 `planned_rules_info`(仅 draft);draft packet 中已生效角色放入 `character_contracts` / `character_profiles`,planned 角色单独放入 `planned_character_contracts` / `planned_character_profiles`,judge packet 仅保留已生效角色;skill 层只透传,不要自行重算或改写
|
|
48
|
+
- 对 `chapter:*:judge` packet,CLI 会优先读取 `chapter_contract.excitement_type`,缺失时回退 `outline.md` 中可选的 `- **ExcitementType**:` 行,并把结果注入 `packet.manifest.inline.excitement_type`(缺失 = `null`);skill 层只透传给 `quality-judge`,不要自行重算或改写
|
|
49
|
+
- 若 QualityJudge 因黄金三章硬门失败而返回相关字段,后续 `novel next --json` 会把它当成强制修订/人工复核信号处理;skill 只按 CLI 给出的下一步继续
|
|
44
50
|
|
|
45
51
|
### 退出条件
|
|
46
52
|
|