docrev 0.6.0 → 0.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/rev.js +78 -16
- package/lib/build.js +2 -1
- package/lib/comment-realign.js +453 -0
- package/lib/import.js +104 -14
- package/lib/wordcomments.js +466 -308
- package/package.json +1 -1
package/bin/rev.js
CHANGED
|
@@ -1525,6 +1525,7 @@ program
|
|
|
1525
1525
|
.option('--toc', 'Include table of contents')
|
|
1526
1526
|
.option('--show-changes', 'Export DOCX with visible track changes (audit mode)')
|
|
1527
1527
|
.option('--dual', 'Output both clean DOCX and DOCX with Word comments (paper.docx + paper_comments.docx)')
|
|
1528
|
+
.option('--reference <docx>', 'Reference DOCX for comment position alignment (use with --dual)')
|
|
1528
1529
|
.action(async (formats, options) => {
|
|
1529
1530
|
const dir = path.resolve(options.dir);
|
|
1530
1531
|
|
|
@@ -1568,6 +1569,11 @@ program
|
|
|
1568
1569
|
config.docx.toc = true;
|
|
1569
1570
|
}
|
|
1570
1571
|
|
|
1572
|
+
// For --dual mode, the clean DOCX should have no comments
|
|
1573
|
+
if (options.dual) {
|
|
1574
|
+
config.docx.keepComments = false;
|
|
1575
|
+
}
|
|
1576
|
+
|
|
1571
1577
|
// Handle --show-changes mode (audit export)
|
|
1572
1578
|
if (options.showChanges) {
|
|
1573
1579
|
if (!targetFormats.includes('docx') && !targetFormats.includes('all')) {
|
|
@@ -1645,28 +1651,84 @@ program
|
|
|
1645
1651
|
process.exit(1);
|
|
1646
1652
|
}
|
|
1647
1653
|
|
|
1648
|
-
// Handle --dual mode: create a second DOCX with proper Word comments
|
|
1654
|
+
// Handle --dual mode: create a second DOCX with proper Word comments using marker-based approach
|
|
1649
1655
|
if (options.dual) {
|
|
1650
1656
|
const docxResult = results.find(r => r.format === 'docx' && r.success);
|
|
1651
1657
|
if (docxResult) {
|
|
1652
|
-
const {
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1658
|
+
const { prepareMarkdownWithMarkers, injectCommentsAtMarkers } = await import('../lib/wordcomments.js');
|
|
1659
|
+
const { runPandoc, loadConfig } = await import('../lib/build.js');
|
|
1660
|
+
|
|
1661
|
+
// Read the combined paper.md (with comments)
|
|
1662
|
+
let markdown = fs.readFileSync(paperPath, 'utf-8');
|
|
1663
|
+
|
|
1664
|
+
// If reference DOCX specified, realign comments from it
|
|
1665
|
+
if (options.reference) {
|
|
1666
|
+
const refPath = path.resolve(dir, options.reference);
|
|
1667
|
+
if (fs.existsSync(refPath)) {
|
|
1668
|
+
const spinRealign = fmt.spinner('Realigning comments from reference...').start();
|
|
1669
|
+
const { realignComments } = await import('../lib/comment-realign.js');
|
|
1670
|
+
// Realign in-memory (don't write to file)
|
|
1671
|
+
const { realignMarkdown } = await import('../lib/comment-realign.js');
|
|
1672
|
+
const realigned = await realignMarkdown(refPath, markdown);
|
|
1673
|
+
if (realigned.success) {
|
|
1674
|
+
markdown = realigned.markdown;
|
|
1675
|
+
spinRealign.stop();
|
|
1676
|
+
console.log(chalk.dim(` Realigned ${realigned.insertions} comments from reference`));
|
|
1677
|
+
} else {
|
|
1678
|
+
spinRealign.stop();
|
|
1679
|
+
console.log(chalk.yellow(` Warning: Could not realign comments: ${realigned.error}`));
|
|
1680
|
+
}
|
|
1681
|
+
} else {
|
|
1682
|
+
console.log(chalk.yellow(` Warning: Reference not found: ${options.reference}`));
|
|
1683
|
+
}
|
|
1684
|
+
}
|
|
1659
1685
|
|
|
1660
|
-
|
|
1661
|
-
const
|
|
1662
|
-
|
|
1686
|
+
// Step 1: Replace comments with markers
|
|
1687
|
+
const spinMarkers = fmt.spinner('Preparing markers...').start();
|
|
1688
|
+
const { markedMarkdown, comments } = prepareMarkdownWithMarkers(markdown);
|
|
1689
|
+
spinMarkers.stop();
|
|
1663
1690
|
|
|
1664
|
-
if (
|
|
1665
|
-
console.log(chalk.
|
|
1666
|
-
console.log(` Clean: ${path.basename(docxResult.outputPath)}`);
|
|
1667
|
-
console.log(` Comments: ${path.basename(commentsDocxPath)} (${commentResult.commentCount} comments)`);
|
|
1691
|
+
if (comments.length === 0) {
|
|
1692
|
+
console.log(chalk.yellow('\nNo comments found - skipping comments DOCX'));
|
|
1668
1693
|
} else {
|
|
1669
|
-
|
|
1694
|
+
// Step 2: Write marked markdown to temp file
|
|
1695
|
+
const markedPath = path.join(dir, '.paper-marked.md');
|
|
1696
|
+
fs.writeFileSync(markedPath, markedMarkdown, 'utf-8');
|
|
1697
|
+
|
|
1698
|
+
// Step 3: Build DOCX from marked markdown using pandoc
|
|
1699
|
+
const spinBuild = fmt.spinner('Building marked DOCX...').start();
|
|
1700
|
+
const markedDocxPath = path.join(dir, '.paper-marked.docx');
|
|
1701
|
+
const pandocResult = await runPandoc(markedPath, 'docx', config, { ...options, outputPath: markedDocxPath });
|
|
1702
|
+
spinBuild.stop();
|
|
1703
|
+
|
|
1704
|
+
if (!pandocResult.success) {
|
|
1705
|
+
console.error(chalk.yellow(`\nWarning: Could not build marked DOCX: ${pandocResult.error}`));
|
|
1706
|
+
} else {
|
|
1707
|
+
// Step 4: Replace markers with comment ranges
|
|
1708
|
+
const commentsDocxPath = docxResult.outputPath.replace(/\.docx$/, '_comments.docx');
|
|
1709
|
+
const spinInject = fmt.spinner('Injecting comments at markers...').start();
|
|
1710
|
+
const commentResult = await injectCommentsAtMarkers(markedDocxPath, comments, commentsDocxPath);
|
|
1711
|
+
spinInject.stop();
|
|
1712
|
+
|
|
1713
|
+
// Clean up temp files (keep for debugging if DEBUG env is set)
|
|
1714
|
+
if (!process.env.DEBUG) {
|
|
1715
|
+
try {
|
|
1716
|
+
fs.unlinkSync(markedPath);
|
|
1717
|
+
fs.unlinkSync(markedDocxPath);
|
|
1718
|
+
} catch { /* ignore */ }
|
|
1719
|
+
}
|
|
1720
|
+
|
|
1721
|
+
if (commentResult.success) {
|
|
1722
|
+
console.log(chalk.cyan('\nDual output:'));
|
|
1723
|
+
console.log(` Clean: ${path.basename(docxResult.outputPath)}`);
|
|
1724
|
+
console.log(` Comments: ${path.basename(commentsDocxPath)} (${commentResult.commentCount} comments)`);
|
|
1725
|
+
if (commentResult.skippedComments > 0) {
|
|
1726
|
+
console.log(chalk.yellow(` Warning: ${commentResult.skippedComments} comments could not be anchored (markers not found)`));
|
|
1727
|
+
}
|
|
1728
|
+
} else {
|
|
1729
|
+
console.error(chalk.yellow(`\nWarning: Could not create comments DOCX: ${commentResult.error}`));
|
|
1730
|
+
}
|
|
1731
|
+
}
|
|
1670
1732
|
}
|
|
1671
1733
|
} else {
|
|
1672
1734
|
console.error(chalk.yellow('\n--dual requires docx format to be built'));
|
package/lib/build.js
CHANGED
|
@@ -399,7 +399,8 @@ export async function runPandoc(inputPath, format, config, options = {}) {
|
|
|
399
399
|
: 'paper';
|
|
400
400
|
|
|
401
401
|
const ext = format === 'tex' ? '.tex' : format === 'pdf' ? '.pdf' : '.docx';
|
|
402
|
-
|
|
402
|
+
// Allow custom output path via options
|
|
403
|
+
const outputPath = options.outputPath || path.join(directory, `${baseName}${ext}`);
|
|
403
404
|
|
|
404
405
|
// Ensure crossref.yaml exists
|
|
405
406
|
ensureCrossrefConfig(directory, config);
|
|
@@ -0,0 +1,453 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Realign comments from a reference DOCX to markdown
|
|
3
|
+
* Uses paragraph-level matching with exact positions
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import * as fs from 'fs';
|
|
7
|
+
import AdmZip from 'adm-zip';
|
|
8
|
+
import { parseStringPromise } from 'xml2js';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Extract paragraphs with their full text and comment positions from DOCX
|
|
12
|
+
*/
|
|
13
|
+
export async function extractParagraphsWithComments(docxPath) {
|
|
14
|
+
const zip = new AdmZip(docxPath);
|
|
15
|
+
const doc = zip.readAsText('word/document.xml');
|
|
16
|
+
const commentsXml = zip.readAsText('word/comments.xml');
|
|
17
|
+
|
|
18
|
+
// Parse comments to get authors and texts
|
|
19
|
+
const parsed = await parseStringPromise(commentsXml, { explicitArray: false });
|
|
20
|
+
const commentNodes = parsed['w:comments']?.['w:comment'];
|
|
21
|
+
if (!commentNodes) return [];
|
|
22
|
+
|
|
23
|
+
const nodes = Array.isArray(commentNodes) ? commentNodes : [commentNodes];
|
|
24
|
+
const commentData = {};
|
|
25
|
+
|
|
26
|
+
for (const c of nodes) {
|
|
27
|
+
const id = c.$['w:id'];
|
|
28
|
+
const author = c.$['w:author'] || 'Unknown';
|
|
29
|
+
let text = '';
|
|
30
|
+
const extractT = (n) => {
|
|
31
|
+
if (!n) return;
|
|
32
|
+
if (n['w:t']) {
|
|
33
|
+
const t = n['w:t'];
|
|
34
|
+
text += typeof t === 'string' ? t : (t._ || t);
|
|
35
|
+
}
|
|
36
|
+
if (n['w:r']) {
|
|
37
|
+
(Array.isArray(n['w:r']) ? n['w:r'] : [n['w:r']]).forEach(extractT);
|
|
38
|
+
}
|
|
39
|
+
if (n['w:p']) {
|
|
40
|
+
(Array.isArray(n['w:p']) ? n['w:p'] : [n['w:p']]).forEach(extractT);
|
|
41
|
+
}
|
|
42
|
+
};
|
|
43
|
+
extractT(c);
|
|
44
|
+
commentData[id] = { author, text: text.trim() };
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Extract paragraphs with comments
|
|
48
|
+
const paragraphs = [];
|
|
49
|
+
const paraPattern = /<w:p\b[^>]*>([\s\S]*?)<\/w:p>/g;
|
|
50
|
+
let match;
|
|
51
|
+
|
|
52
|
+
while ((match = paraPattern.exec(doc)) !== null) {
|
|
53
|
+
const paraContent = match[1];
|
|
54
|
+
const hasComments = /commentRangeStart/.test(paraContent);
|
|
55
|
+
|
|
56
|
+
// Build paragraph text and track comment positions
|
|
57
|
+
let text = '';
|
|
58
|
+
const comments = [];
|
|
59
|
+
|
|
60
|
+
const tokenPattern = /<w:t[^>]*>([^<]*)<\/w:t>|<w:commentRangeStart[^>]*w:id="(\d+)"[^>]*\/?>/g;
|
|
61
|
+
let tokenMatch;
|
|
62
|
+
|
|
63
|
+
while ((tokenMatch = tokenPattern.exec(paraContent)) !== null) {
|
|
64
|
+
if (tokenMatch[1] !== undefined) {
|
|
65
|
+
text += tokenMatch[1];
|
|
66
|
+
} else if (tokenMatch[2] !== undefined) {
|
|
67
|
+
const cid = tokenMatch[2];
|
|
68
|
+
const data = commentData[cid];
|
|
69
|
+
if (data) {
|
|
70
|
+
comments.push({
|
|
71
|
+
id: cid,
|
|
72
|
+
position: text.length,
|
|
73
|
+
author: data.author,
|
|
74
|
+
text: data.text,
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
if (text.trim() || hasComments) {
|
|
81
|
+
paragraphs.push({ text: text.trim(), comments });
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
return paragraphs;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Find best matching paragraph in markdown for a reference paragraph
|
|
90
|
+
*/
|
|
91
|
+
function findMatchingParagraph(refText, mdParagraphs) {
|
|
92
|
+
// Normalize for comparison
|
|
93
|
+
const normalize = (s) => s.toLowerCase().replace(/\s+/g, ' ').trim();
|
|
94
|
+
const refNorm = normalize(refText);
|
|
95
|
+
|
|
96
|
+
if (refNorm.length < 20) return null;
|
|
97
|
+
|
|
98
|
+
let bestMatch = null;
|
|
99
|
+
let bestScore = 0;
|
|
100
|
+
|
|
101
|
+
for (let i = 0; i < mdParagraphs.length; i++) {
|
|
102
|
+
const mdNorm = normalize(mdParagraphs[i].text);
|
|
103
|
+
|
|
104
|
+
// Calculate word overlap
|
|
105
|
+
const refWords = new Set(refNorm.split(' ').filter((w) => w.length > 3));
|
|
106
|
+
const mdWords = mdNorm.split(' ').filter((w) => w.length > 3);
|
|
107
|
+
const overlap = mdWords.filter((w) => refWords.has(w)).length;
|
|
108
|
+
const score = overlap / Math.max(refWords.size, 1);
|
|
109
|
+
|
|
110
|
+
// Also check for substring containment (for section headers)
|
|
111
|
+
const containsStart = mdNorm.includes(refNorm.slice(0, 50));
|
|
112
|
+
|
|
113
|
+
if (score > bestScore || (containsStart && score > 0.3)) {
|
|
114
|
+
bestScore = Math.max(score, containsStart ? 0.8 : score);
|
|
115
|
+
bestMatch = { index: i, score: bestScore, paragraph: mdParagraphs[i] };
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
return bestScore > 0.4 ? bestMatch : null;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Extract paragraphs from markdown (split by blank lines)
|
|
124
|
+
*/
|
|
125
|
+
function parseMdParagraphs(markdown) {
|
|
126
|
+
const paragraphs = [];
|
|
127
|
+
const parts = markdown.split(/\n\n+/);
|
|
128
|
+
|
|
129
|
+
let pos = 0;
|
|
130
|
+
for (const part of parts) {
|
|
131
|
+
const trimmed = part.trim();
|
|
132
|
+
if (trimmed) {
|
|
133
|
+
paragraphs.push({
|
|
134
|
+
text: trimmed,
|
|
135
|
+
start: markdown.indexOf(part, pos),
|
|
136
|
+
end: markdown.indexOf(part, pos) + part.length,
|
|
137
|
+
});
|
|
138
|
+
pos = markdown.indexOf(part, pos) + part.length;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
return paragraphs;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/**
|
|
146
|
+
* Strip existing comments from a specific author
|
|
147
|
+
*/
|
|
148
|
+
function stripAuthorComments(text, author) {
|
|
149
|
+
const pattern = new RegExp(`\\s*\\{>>${author}:[^<]*<<\\}`, 'g');
|
|
150
|
+
return text.replace(pattern, '');
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Normalize text for matching (remove citations, extra whitespace)
|
|
155
|
+
*/
|
|
156
|
+
function normalizeForMatching(text) {
|
|
157
|
+
return text
|
|
158
|
+
// Remove Word citation placeholders
|
|
159
|
+
.replace(/\(\s*\$+\s*\)/g, '')
|
|
160
|
+
.replace(/\$+/g, '')
|
|
161
|
+
// Remove markdown citations
|
|
162
|
+
.replace(/\[@[^\]]+\]/g, '')
|
|
163
|
+
.replace(/@[A-Z][a-z]+\d{4}/g, '')
|
|
164
|
+
// Remove rendered citations like "(Author et al. 2021)"
|
|
165
|
+
.replace(/\([A-Z][a-z]+(?:\s+et\s+al\.?)?\s+\d{4}[a-z]?(?:[;,]\s*[A-Z][a-z]+(?:\s+et\s+al\.?)?\s+\d{4}[a-z]?)*\)/g, '')
|
|
166
|
+
// Remove figure references like "Fig. 1" or "(Fig. 1)"
|
|
167
|
+
.replace(/\(?Fig\.?\s*\d+[a-z]?\)?/gi, '')
|
|
168
|
+
// Normalize whitespace
|
|
169
|
+
.replace(/\s+/g, ' ')
|
|
170
|
+
.trim()
|
|
171
|
+
.toLowerCase();
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
/**
|
|
175
|
+
* Find the word at or near a position in text
|
|
176
|
+
*/
|
|
177
|
+
function getWordAtPosition(text, pos) {
|
|
178
|
+
const before = text.slice(Math.max(0, pos - 30), pos);
|
|
179
|
+
const after = text.slice(pos, pos + 30);
|
|
180
|
+
|
|
181
|
+
// Get the last complete word before position
|
|
182
|
+
const beforeWords = before.split(/\s+/).filter(w => w.length > 2);
|
|
183
|
+
const afterWords = after.split(/\s+/).filter(w => w.length > 2);
|
|
184
|
+
|
|
185
|
+
return {
|
|
186
|
+
before: beforeWords.slice(-3),
|
|
187
|
+
after: afterWords.slice(0, 3)
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
/**
|
|
192
|
+
* Find position in markdown paragraph matching reference position
|
|
193
|
+
* Uses the anchor word (word immediately before the comment) for precise matching
|
|
194
|
+
*/
|
|
195
|
+
function findMdPosition(refText, refPos, mdText) {
|
|
196
|
+
// Get the word(s) immediately before the comment position in reference
|
|
197
|
+
const refWords = getWordAtPosition(refText, refPos);
|
|
198
|
+
const normalizedMd = normalizeForMatching(mdText);
|
|
199
|
+
|
|
200
|
+
// The "anchor word" is the last word before the comment
|
|
201
|
+
const anchorWords = refWords.before;
|
|
202
|
+
|
|
203
|
+
if (anchorWords.length === 0) {
|
|
204
|
+
const ratio = refPos / Math.max(refText.length, 1);
|
|
205
|
+
return Math.round(ratio * mdText.length);
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// Try to find the anchor word(s) in markdown
|
|
209
|
+
// Start with the most specific (all words), fall back to fewer
|
|
210
|
+
for (let numWords = anchorWords.length; numWords >= 1; numWords--) {
|
|
211
|
+
const searchWords = anchorWords.slice(-numWords);
|
|
212
|
+
const pattern = searchWords.map(w =>
|
|
213
|
+
w.toLowerCase().replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
|
|
214
|
+
).join('\\s+');
|
|
215
|
+
|
|
216
|
+
const regex = new RegExp(pattern, 'g');
|
|
217
|
+
const matches = [...normalizedMd.matchAll(regex)];
|
|
218
|
+
|
|
219
|
+
if (matches.length === 1) {
|
|
220
|
+
// Unique match - use this position
|
|
221
|
+
const matchEnd = matches[0].index + matches[0][0].length;
|
|
222
|
+
// Map back to original markdown position
|
|
223
|
+
const ratio = matchEnd / Math.max(normalizedMd.length, 1);
|
|
224
|
+
return Math.round(ratio * mdText.length);
|
|
225
|
+
} else if (matches.length > 1) {
|
|
226
|
+
// Multiple matches - use context after to disambiguate
|
|
227
|
+
const afterWords = refWords.after;
|
|
228
|
+
if (afterWords.length > 0) {
|
|
229
|
+
const afterPattern = afterWords[0].toLowerCase().replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
|
230
|
+
for (const match of matches) {
|
|
231
|
+
const matchEnd = match.index + match[0].length;
|
|
232
|
+
const afterContext = normalizedMd.slice(matchEnd, matchEnd + 50);
|
|
233
|
+
if (afterContext.includes(afterPattern)) {
|
|
234
|
+
const ratio = matchEnd / Math.max(normalizedMd.length, 1);
|
|
235
|
+
return Math.round(ratio * mdText.length);
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
// Fall back to first match
|
|
240
|
+
const matchEnd = matches[0].index + matches[0][0].length;
|
|
241
|
+
const ratio = matchEnd / Math.max(normalizedMd.length, 1);
|
|
242
|
+
return Math.round(ratio * mdText.length);
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// Fallback: proportional position
|
|
247
|
+
const ratio = refPos / Math.max(refText.length, 1);
|
|
248
|
+
return Math.round(ratio * mdText.length);
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
/**
|
|
252
|
+
* Extract reply comments that follow a parent comment
|
|
253
|
+
* Returns map of parent comment text -> array of reply texts
|
|
254
|
+
*/
|
|
255
|
+
function extractReplies(markdown, parentAuthor, replyAuthor) {
|
|
256
|
+
const replies = new Map();
|
|
257
|
+
const pattern = new RegExp(
|
|
258
|
+
`\\{>>${parentAuthor}:\\s*([^<]+)<<\\}((?:\\s*\\{>>${replyAuthor}:[^<]+<<\\})*)`,
|
|
259
|
+
'g'
|
|
260
|
+
);
|
|
261
|
+
|
|
262
|
+
let match;
|
|
263
|
+
while ((match = pattern.exec(markdown)) !== null) {
|
|
264
|
+
const parentText = match[1].trim();
|
|
265
|
+
const replyBlock = match[2];
|
|
266
|
+
|
|
267
|
+
if (replyBlock) {
|
|
268
|
+
const replyPattern = new RegExp(`\\{>>${replyAuthor}:\\s*([^<]+)<<\\}`, 'g');
|
|
269
|
+
const replyTexts = [];
|
|
270
|
+
let replyMatch;
|
|
271
|
+
while ((replyMatch = replyPattern.exec(replyBlock)) !== null) {
|
|
272
|
+
replyTexts.push(replyMatch[1].trim());
|
|
273
|
+
}
|
|
274
|
+
if (replyTexts.length > 0) {
|
|
275
|
+
replies.set(parentText.slice(0, 50), replyTexts); // Use first 50 chars as key
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
return replies;
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
/**
|
|
284
|
+
* Realign comments from reference DOCX to markdown
|
|
285
|
+
* @param {string} docxPath - Reference DOCX with correctly positioned comments
|
|
286
|
+
* @param {string} markdownPath - Markdown to realign
|
|
287
|
+
* @param {object} options - {dryRun: boolean, author: string, replyAuthor: string}
|
|
288
|
+
*/
|
|
289
|
+
export async function realignComments(docxPath, markdownPath, options = {}) {
|
|
290
|
+
const { dryRun = false, author = 'Guy Colling', replyAuthor = 'Gilles Colling' } = options;
|
|
291
|
+
|
|
292
|
+
// Read original markdown to extract replies before stripping
|
|
293
|
+
const originalMarkdown = fs.readFileSync(markdownPath, 'utf-8');
|
|
294
|
+
|
|
295
|
+
// Extract reply relationships
|
|
296
|
+
const replies = extractReplies(originalMarkdown, author, replyAuthor);
|
|
297
|
+
console.log(`Found ${replies.size} ${author} comments with ${replyAuthor} replies`);
|
|
298
|
+
|
|
299
|
+
// Extract reference paragraphs with comments
|
|
300
|
+
const refParagraphs = await extractParagraphsWithComments(docxPath);
|
|
301
|
+
const refWithComments = refParagraphs.filter(
|
|
302
|
+
(p) => p.comments.length > 0 && p.comments.some((c) => c.author === author)
|
|
303
|
+
);
|
|
304
|
+
|
|
305
|
+
console.log(`Found ${refWithComments.length} paragraphs with ${author} comments in reference`);
|
|
306
|
+
|
|
307
|
+
// Strip ALL comments (both authors) from markdown to start fresh
|
|
308
|
+
let markdown = originalMarkdown;
|
|
309
|
+
markdown = markdown.replace(/\s*\{>>[^<]+<<\}/g, '');
|
|
310
|
+
console.log(`Stripped all comments from markdown`);
|
|
311
|
+
|
|
312
|
+
// Parse markdown paragraphs
|
|
313
|
+
const mdParagraphs = parseMdParagraphs(markdown);
|
|
314
|
+
|
|
315
|
+
// Track insertions (position, text) - will insert from end to start
|
|
316
|
+
const insertions = [];
|
|
317
|
+
let matched = 0;
|
|
318
|
+
let unmatched = 0;
|
|
319
|
+
|
|
320
|
+
for (const refPara of refWithComments) {
|
|
321
|
+
const match = findMatchingParagraph(refPara.text, mdParagraphs);
|
|
322
|
+
|
|
323
|
+
if (!match) {
|
|
324
|
+
console.log(` No match for: "${refPara.text.slice(0, 60)}..."`);
|
|
325
|
+
unmatched++;
|
|
326
|
+
continue;
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
matched++;
|
|
330
|
+
const mdPara = match.paragraph;
|
|
331
|
+
|
|
332
|
+
// Get author's comments in this paragraph
|
|
333
|
+
const authorComments = refPara.comments.filter((c) => c.author === author);
|
|
334
|
+
|
|
335
|
+
for (const comment of authorComments) {
|
|
336
|
+
// Find corresponding position in markdown paragraph
|
|
337
|
+
const mdPos = findMdPosition(refPara.text, comment.position, mdPara.text);
|
|
338
|
+
const absolutePos = mdPara.start + mdPos;
|
|
339
|
+
|
|
340
|
+
// Build comment mark with any replies
|
|
341
|
+
let commentMark = ` {>>${comment.author}: ${comment.text}<<}`;
|
|
342
|
+
|
|
343
|
+
// Check for replies
|
|
344
|
+
const replyKey = comment.text.trim().slice(0, 50);
|
|
345
|
+
const replyTexts = replies.get(replyKey);
|
|
346
|
+
if (replyTexts) {
|
|
347
|
+
for (const replyText of replyTexts) {
|
|
348
|
+
commentMark += ` {>>${replyAuthor}: ${replyText}<<}`;
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
insertions.push({
|
|
353
|
+
position: absolutePos,
|
|
354
|
+
text: commentMark,
|
|
355
|
+
commentText: comment.text.slice(0, 30),
|
|
356
|
+
hasReplies: !!replyTexts,
|
|
357
|
+
debug: `"${mdPara.text.slice(Math.max(0, mdPos - 20), mdPos)}|HERE|${mdPara.text.slice(mdPos, mdPos + 20)}"`,
|
|
358
|
+
});
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
console.log(`Matched ${matched} paragraphs, ${unmatched} unmatched`);
|
|
363
|
+
console.log(`Inserting ${insertions.length} comments (${insertions.filter((i) => i.hasReplies).length} with replies)`);
|
|
364
|
+
|
|
365
|
+
if (dryRun) {
|
|
366
|
+
console.log('\nDry run - would insert:');
|
|
367
|
+
for (const ins of insertions.slice(0, 10)) {
|
|
368
|
+
console.log(` At ${ins.position}: ${ins.debug}`);
|
|
369
|
+
console.log(` Comment: "${ins.commentText}..."${ins.hasReplies ? ' (+ replies)' : ''}`);
|
|
370
|
+
}
|
|
371
|
+
return { success: true, dryRun: true, insertions: insertions.length };
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
// Sort by position descending and insert
|
|
375
|
+
insertions.sort((a, b) => b.position - a.position);
|
|
376
|
+
|
|
377
|
+
for (const ins of insertions) {
|
|
378
|
+
markdown = markdown.slice(0, ins.position) + ins.text + markdown.slice(ins.position);
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
// Write result
|
|
382
|
+
fs.writeFileSync(markdownPath, markdown);
|
|
383
|
+
|
|
384
|
+
return { success: true, insertions: insertions.length, matched, unmatched };
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
/**
|
|
388
|
+
* Realign comments in markdown string (in-memory, doesn't write to file)
|
|
389
|
+
* @param {string} docxPath - Reference DOCX with correctly positioned comments
|
|
390
|
+
* @param {string} markdown - Markdown content to realign
|
|
391
|
+
* @param {object} options - {author: string, replyAuthor: string}
|
|
392
|
+
* @returns {Promise<{success: boolean, markdown: string, insertions: number}>}
|
|
393
|
+
*/
|
|
394
|
+
export async function realignMarkdown(docxPath, markdown, options = {}) {
|
|
395
|
+
const { author = 'Guy Colling', replyAuthor = 'Gilles Colling' } = options;
|
|
396
|
+
|
|
397
|
+
try {
|
|
398
|
+
// Extract reply relationships from original markdown
|
|
399
|
+
const replies = extractReplies(markdown, author, replyAuthor);
|
|
400
|
+
|
|
401
|
+
// Extract reference paragraphs with comments
|
|
402
|
+
const refParagraphs = await extractParagraphsWithComments(docxPath);
|
|
403
|
+
const refWithComments = refParagraphs.filter(
|
|
404
|
+
(p) => p.comments.length > 0 && p.comments.some((c) => c.author === author)
|
|
405
|
+
);
|
|
406
|
+
|
|
407
|
+
// Strip ALL comments from markdown
|
|
408
|
+
let result = markdown.replace(/\s*\{>>[^<]+<<\}/g, '');
|
|
409
|
+
|
|
410
|
+
// Parse markdown paragraphs
|
|
411
|
+
const mdParagraphs = parseMdParagraphs(result);
|
|
412
|
+
|
|
413
|
+
// Track insertions
|
|
414
|
+
const insertions = [];
|
|
415
|
+
|
|
416
|
+
for (const refPara of refWithComments) {
|
|
417
|
+
const match = findMatchingParagraph(refPara.text, mdParagraphs);
|
|
418
|
+
if (!match) continue;
|
|
419
|
+
|
|
420
|
+
const mdPara = match.paragraph;
|
|
421
|
+
const authorComments = refPara.comments.filter((c) => c.author === author);
|
|
422
|
+
|
|
423
|
+
for (const comment of authorComments) {
|
|
424
|
+
const mdPos = findMdPosition(refPara.text, comment.position, mdPara.text);
|
|
425
|
+
const absolutePos = mdPara.start + mdPos;
|
|
426
|
+
|
|
427
|
+
let commentMark = ` {>>${comment.author}: ${comment.text}<<}`;
|
|
428
|
+
|
|
429
|
+
// Check for replies
|
|
430
|
+
const replyKey = comment.text.trim().slice(0, 50);
|
|
431
|
+
const replyTexts = replies.get(replyKey);
|
|
432
|
+
if (replyTexts) {
|
|
433
|
+
for (const replyText of replyTexts) {
|
|
434
|
+
commentMark += ` {>>${replyAuthor}: ${replyText}<<}`;
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
insertions.push({ position: absolutePos, text: commentMark });
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
// Sort by position descending and insert
|
|
443
|
+
insertions.sort((a, b) => b.position - a.position);
|
|
444
|
+
|
|
445
|
+
for (const ins of insertions) {
|
|
446
|
+
result = result.slice(0, ins.position) + ins.text + result.slice(ins.position);
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
return { success: true, markdown: result, insertions: insertions.length };
|
|
450
|
+
} catch (err) {
|
|
451
|
+
return { success: false, markdown, insertions: 0, error: err.message };
|
|
452
|
+
}
|
|
453
|
+
}
|