@deepcitation/deepcitation-js 1.1.27 → 1.1.29
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +253 -253
- package/lib/chunk-2IZXUOQR.js +66 -0
- package/lib/chunk-3GR7VKUJ.cjs +1 -0
- package/lib/chunk-4FGOHQFP.cjs +66 -0
- package/lib/chunk-CFXDRAJL.cjs +1 -0
- package/lib/chunk-F2MMVEVC.cjs +1 -0
- package/lib/chunk-LSKISWWH.cjs +2 -0
- package/lib/chunk-O2XFH626.js +1 -0
- package/lib/chunk-QGXCOW3E.js +1 -0
- package/lib/chunk-RCHWBA6D.js +2 -0
- package/lib/chunk-RQPZSRID.js +1 -0
- package/lib/client/index.cjs +1 -0
- package/lib/client/{DeepCitation.d.ts → index.d.cts} +159 -3
- package/lib/client/index.d.ts +342 -2
- package/lib/client/index.js +1 -1
- package/lib/index.cjs +1 -0
- package/lib/index.d.cts +105 -0
- package/lib/index.d.ts +103 -21
- package/lib/index.js +1 -20
- package/lib/prompts/index.cjs +1 -0
- package/lib/prompts/index.d.cts +196 -0
- package/lib/prompts/index.d.ts +196 -3
- package/lib/prompts/index.js +1 -3
- package/lib/react/index.cjs +4 -0
- package/lib/react/{types.d.ts → index.d.cts} +173 -22
- package/lib/react/index.d.ts +461 -12
- package/lib/react/index.js +4 -20
- package/lib/types/index.cjs +1 -0
- package/lib/types/index.d.cts +96 -0
- package/lib/types/index.d.ts +96 -11
- package/lib/types/index.js +1 -7
- package/lib/utils-CSqRI6NU.d.cts +45 -0
- package/lib/{react/utils.d.ts → utils-D_wxy_ni.d.ts} +13 -12
- package/package.json +46 -11
- package/lib/client/DeepCitation.js +0 -374
- package/lib/client/types.d.ts +0 -154
- package/lib/client/types.js +0 -1
- package/lib/parsing/normalizeCitation.d.ts +0 -5
- package/lib/parsing/normalizeCitation.js +0 -198
- package/lib/parsing/parseCitation.d.ts +0 -79
- package/lib/parsing/parseCitation.js +0 -431
- package/lib/parsing/parseWorkAround.d.ts +0 -2
- package/lib/parsing/parseWorkAround.js +0 -73
- package/lib/prompts/citationPrompts.d.ts +0 -138
- package/lib/prompts/citationPrompts.js +0 -168
- package/lib/prompts/promptCompression.d.ts +0 -14
- package/lib/prompts/promptCompression.js +0 -127
- package/lib/prompts/types.d.ts +0 -4
- package/lib/prompts/types.js +0 -1
- package/lib/react/CitationComponent.d.ts +0 -106
- package/lib/react/CitationComponent.js +0 -419
- package/lib/react/CitationVariants.d.ts +0 -132
- package/lib/react/CitationVariants.js +0 -277
- package/lib/react/DiffDisplay.d.ts +0 -10
- package/lib/react/DiffDisplay.js +0 -33
- package/lib/react/Popover.d.ts +0 -15
- package/lib/react/Popover.js +0 -20
- package/lib/react/UrlCitationComponent.d.ts +0 -83
- package/lib/react/UrlCitationComponent.js +0 -224
- package/lib/react/VerificationTabs.d.ts +0 -10
- package/lib/react/VerificationTabs.js +0 -36
- package/lib/react/icons.d.ts +0 -22
- package/lib/react/icons.js +0 -16
- package/lib/react/primitives.d.ts +0 -99
- package/lib/react/primitives.js +0 -187
- package/lib/react/types.js +0 -1
- package/lib/react/useSmartDiff.d.ts +0 -16
- package/lib/react/useSmartDiff.js +0 -64
- package/lib/react/utils.js +0 -88
- package/lib/types/boxes.d.ts +0 -11
- package/lib/types/boxes.js +0 -1
- package/lib/types/citation.d.ts +0 -39
- package/lib/types/citation.js +0 -1
- package/lib/types/search.d.ts +0 -19
- package/lib/types/search.js +0 -1
- package/lib/types/verification.d.ts +0 -27
- package/lib/types/verification.js +0 -11
- package/lib/utils/diff.d.ts +0 -60
- package/lib/utils/diff.js +0 -414
- package/lib/utils/sha.d.ts +0 -10
- package/lib/utils/sha.js +0 -108
package/lib/utils/diff.js
DELETED
|
@@ -1,414 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Custom diff implementation to replace the 'diff' npm package.
|
|
3
|
-
* This avoids dependency issues in Firebase Functions environments.
|
|
4
|
-
*
|
|
5
|
-
* Implements a Myers diff algorithm with optimizations inspired by jsdiff.
|
|
6
|
-
* @see https://github.com/kpdecker/jsdiff
|
|
7
|
-
*
|
|
8
|
-
* ---
|
|
9
|
-
*
|
|
10
|
-
* BSD 3-Clause License
|
|
11
|
-
*
|
|
12
|
-
* Copyright (c) 2009-2015, Kevin Decker <kpdecker@gmail.com>
|
|
13
|
-
* All rights reserved.
|
|
14
|
-
*
|
|
15
|
-
* Redistribution and use in source and binary forms, with or without
|
|
16
|
-
* modification, are permitted provided that the following conditions are met:
|
|
17
|
-
*
|
|
18
|
-
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
19
|
-
* list of conditions and the following disclaimer.
|
|
20
|
-
*
|
|
21
|
-
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
22
|
-
* this list of conditions and the following disclaimer in the documentation
|
|
23
|
-
* and/or other materials provided with the distribution.
|
|
24
|
-
*
|
|
25
|
-
* 3. Neither the name of the copyright holder nor the names of its
|
|
26
|
-
* contributors may be used to endorse or promote products derived from
|
|
27
|
-
* this software without specific prior written permission.
|
|
28
|
-
*
|
|
29
|
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
30
|
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
31
|
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
32
|
-
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
33
|
-
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
34
|
-
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
35
|
-
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
36
|
-
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
37
|
-
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
38
|
-
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
39
|
-
*/
|
|
40
|
-
/**
|
|
41
|
-
* Myers diff algorithm with diagonal pruning optimization.
|
|
42
|
-
* This reduces complexity from O(n+d²) to O(n+d) for common cases like appending text.
|
|
43
|
-
*
|
|
44
|
-
* @see https://blog.jcoglan.com/2017/02/12/the-myers-diff-algorithm-part-1/
|
|
45
|
-
*/
|
|
46
|
-
function computeDiff(oldTokens, newTokens, equals = (a, b) => a === b) {
|
|
47
|
-
const oldLen = oldTokens.length;
|
|
48
|
-
const newLen = newTokens.length;
|
|
49
|
-
// Handle edge cases
|
|
50
|
-
if (oldLen === 0 && newLen === 0) {
|
|
51
|
-
return [];
|
|
52
|
-
}
|
|
53
|
-
// Quick path for completely new content
|
|
54
|
-
if (oldLen === 0) {
|
|
55
|
-
return [{ value: newTokens.join(""), added: true, count: newTokens.length }];
|
|
56
|
-
}
|
|
57
|
-
// Quick path for completely removed content
|
|
58
|
-
if (newLen === 0) {
|
|
59
|
-
return [
|
|
60
|
-
{ value: oldTokens.join(""), removed: true, count: oldTokens.length },
|
|
61
|
-
];
|
|
62
|
-
}
|
|
63
|
-
// Find common prefix
|
|
64
|
-
let commonPrefixLen = 0;
|
|
65
|
-
while (commonPrefixLen < oldLen &&
|
|
66
|
-
commonPrefixLen < newLen &&
|
|
67
|
-
equals(oldTokens[commonPrefixLen], newTokens[commonPrefixLen])) {
|
|
68
|
-
commonPrefixLen++;
|
|
69
|
-
}
|
|
70
|
-
// Find common suffix (but don't overlap with prefix)
|
|
71
|
-
let commonSuffixLen = 0;
|
|
72
|
-
while (commonSuffixLen < oldLen - commonPrefixLen &&
|
|
73
|
-
commonSuffixLen < newLen - commonPrefixLen &&
|
|
74
|
-
equals(oldTokens[oldLen - 1 - commonSuffixLen], newTokens[newLen - 1 - commonSuffixLen])) {
|
|
75
|
-
commonSuffixLen++;
|
|
76
|
-
}
|
|
77
|
-
// Extract the differing middle portions
|
|
78
|
-
const oldMiddle = oldTokens.slice(commonPrefixLen, oldLen - commonSuffixLen);
|
|
79
|
-
const newMiddle = newTokens.slice(commonPrefixLen, newLen - commonSuffixLen);
|
|
80
|
-
// If middles are empty, we only have common prefix/suffix
|
|
81
|
-
if (oldMiddle.length === 0 && newMiddle.length === 0) {
|
|
82
|
-
return [{ value: oldTokens.join(""), count: oldTokens.length }];
|
|
83
|
-
}
|
|
84
|
-
// Compute diff on the middle portion using Myers algorithm
|
|
85
|
-
const middleDiff = myersDiff(oldMiddle, newMiddle, equals);
|
|
86
|
-
// Build result with prefix, middle diff, and suffix
|
|
87
|
-
const result = [];
|
|
88
|
-
if (commonPrefixLen > 0) {
|
|
89
|
-
result.push({
|
|
90
|
-
value: oldTokens.slice(0, commonPrefixLen).join(""),
|
|
91
|
-
count: commonPrefixLen,
|
|
92
|
-
});
|
|
93
|
-
}
|
|
94
|
-
result.push(...middleDiff);
|
|
95
|
-
if (commonSuffixLen > 0) {
|
|
96
|
-
result.push({
|
|
97
|
-
value: oldTokens.slice(oldLen - commonSuffixLen).join(""),
|
|
98
|
-
count: commonSuffixLen,
|
|
99
|
-
});
|
|
100
|
-
}
|
|
101
|
-
return mergeConsecutiveChanges(result);
|
|
102
|
-
}
|
|
103
|
-
/**
|
|
104
|
-
* Myers diff algorithm implementation.
|
|
105
|
-
* Uses the "middle snake" approach for better memory efficiency.
|
|
106
|
-
*/
|
|
107
|
-
function myersDiff(oldTokens, newTokens, equals) {
|
|
108
|
-
const oldLen = oldTokens.length;
|
|
109
|
-
const newLen = newTokens.length;
|
|
110
|
-
const maxD = oldLen + newLen;
|
|
111
|
-
// V array indexed by k = x - y (diagonal)
|
|
112
|
-
// We use an object to handle negative indices
|
|
113
|
-
const v = { 1: 0 };
|
|
114
|
-
// Store the path for backtracking
|
|
115
|
-
const trace = [];
|
|
116
|
-
// Iterate through edit distances
|
|
117
|
-
outer: for (let d = 0; d <= maxD; d++) {
|
|
118
|
-
trace.push({ ...v });
|
|
119
|
-
// Iterate through diagonals
|
|
120
|
-
for (let k = -d; k <= d; k += 2) {
|
|
121
|
-
// Decide whether to go down or right
|
|
122
|
-
let x;
|
|
123
|
-
if (k === -d || (k !== d && v[k - 1] < v[k + 1])) {
|
|
124
|
-
x = v[k + 1]; // Move down (insert)
|
|
125
|
-
}
|
|
126
|
-
else {
|
|
127
|
-
x = v[k - 1] + 1; // Move right (delete)
|
|
128
|
-
}
|
|
129
|
-
let y = x - k;
|
|
130
|
-
// Follow diagonal (matches)
|
|
131
|
-
while (x < oldLen && y < newLen && equals(oldTokens[x], newTokens[y])) {
|
|
132
|
-
x++;
|
|
133
|
-
y++;
|
|
134
|
-
}
|
|
135
|
-
v[k] = x;
|
|
136
|
-
// Check if we've reached the end
|
|
137
|
-
if (x >= oldLen && y >= newLen) {
|
|
138
|
-
break outer;
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
}
|
|
142
|
-
// Backtrack to build the diff
|
|
143
|
-
return backtrack(trace, oldTokens, newTokens);
|
|
144
|
-
}
|
|
145
|
-
/**
|
|
146
|
-
* Backtrack through the trace to build the diff result.
|
|
147
|
-
*/
|
|
148
|
-
function backtrack(trace, oldTokens, newTokens) {
|
|
149
|
-
const changes = [];
|
|
150
|
-
let x = oldTokens.length;
|
|
151
|
-
let y = newTokens.length;
|
|
152
|
-
for (let d = trace.length - 1; d >= 0; d--) {
|
|
153
|
-
const v = trace[d];
|
|
154
|
-
const k = x - y;
|
|
155
|
-
let prevK;
|
|
156
|
-
if (k === -d || (k !== d && v[k - 1] < v[k + 1])) {
|
|
157
|
-
prevK = k + 1;
|
|
158
|
-
}
|
|
159
|
-
else {
|
|
160
|
-
prevK = k - 1;
|
|
161
|
-
}
|
|
162
|
-
const prevX = v[prevK] ?? 0;
|
|
163
|
-
const prevY = prevX - prevK;
|
|
164
|
-
// Add diagonal matches (unchanged)
|
|
165
|
-
while (x > prevX && y > prevY) {
|
|
166
|
-
x--;
|
|
167
|
-
y--;
|
|
168
|
-
changes.unshift({ value: oldTokens[x], count: 1 });
|
|
169
|
-
}
|
|
170
|
-
if (d > 0) {
|
|
171
|
-
if (x === prevX) {
|
|
172
|
-
// Insertion (went down)
|
|
173
|
-
y--;
|
|
174
|
-
changes.unshift({ value: newTokens[y], added: true, count: 1 });
|
|
175
|
-
}
|
|
176
|
-
else {
|
|
177
|
-
// Deletion (went right)
|
|
178
|
-
x--;
|
|
179
|
-
changes.unshift({ value: oldTokens[x], removed: true, count: 1 });
|
|
180
|
-
}
|
|
181
|
-
}
|
|
182
|
-
}
|
|
183
|
-
return changes;
|
|
184
|
-
}
|
|
185
|
-
/**
|
|
186
|
-
* Merge consecutive changes of the same type.
|
|
187
|
-
*/
|
|
188
|
-
function mergeConsecutiveChanges(changes) {
|
|
189
|
-
if (changes.length === 0)
|
|
190
|
-
return [];
|
|
191
|
-
const result = [];
|
|
192
|
-
for (const change of changes) {
|
|
193
|
-
const last = result[result.length - 1];
|
|
194
|
-
if (last &&
|
|
195
|
-
last.added === change.added &&
|
|
196
|
-
last.removed === change.removed) {
|
|
197
|
-
last.value += change.value;
|
|
198
|
-
last.count = (last.count || 1) + (change.count || 1);
|
|
199
|
-
}
|
|
200
|
-
else {
|
|
201
|
-
result.push({ ...change });
|
|
202
|
-
}
|
|
203
|
-
}
|
|
204
|
-
return result;
|
|
205
|
-
}
|
|
206
|
-
/**
|
|
207
|
-
* Split text into lines, preserving line endings.
|
|
208
|
-
* Handles both Unix (\n) and Windows (\r\n) line endings.
|
|
209
|
-
*/
|
|
210
|
-
function splitLines(text) {
|
|
211
|
-
if (!text)
|
|
212
|
-
return [];
|
|
213
|
-
const lines = [];
|
|
214
|
-
let current = "";
|
|
215
|
-
for (let i = 0; i < text.length; i++) {
|
|
216
|
-
const char = text[i];
|
|
217
|
-
current += char;
|
|
218
|
-
if (char === "\n") {
|
|
219
|
-
lines.push(current);
|
|
220
|
-
current = "";
|
|
221
|
-
}
|
|
222
|
-
}
|
|
223
|
-
// Don't forget the last line if it doesn't end with newline
|
|
224
|
-
if (current.length > 0) {
|
|
225
|
-
lines.push(current);
|
|
226
|
-
}
|
|
227
|
-
return lines;
|
|
228
|
-
}
|
|
229
|
-
/**
|
|
230
|
-
* Extended word character class - matches jsdiff's extendedWordChars.
|
|
231
|
-
* Includes: a-zA-Z0-9_, soft hyphen, Latin Extended-A/B, IPA Extensions,
|
|
232
|
-
* Spacing Modifier Letters, and Latin Extended Additional.
|
|
233
|
-
*
|
|
234
|
-
* @see https://github.com/kpdecker/jsdiff/blob/master/src/diff/word.ts
|
|
235
|
-
*/
|
|
236
|
-
const EXTENDED_WORD_CHARS = "a-zA-Z0-9_\\u00AD\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02C6\\u02C8-\\u02D7\\u02DE-\\u02FF\\u1E00-\\u1EFF";
|
|
237
|
-
/**
|
|
238
|
-
* Tokenization regex matching jsdiff's approach.
|
|
239
|
-
* Matches: word character runs, whitespace runs, or single non-word chars.
|
|
240
|
-
*/
|
|
241
|
-
const TOKENIZE_REGEX = new RegExp(`[${EXTENDED_WORD_CHARS}]+|\\s+|[^${EXTENDED_WORD_CHARS}]`, "gu");
|
|
242
|
-
/**
|
|
243
|
-
* Split text into tokens using jsdiff's tokenization approach.
|
|
244
|
-
* Each token is one of:
|
|
245
|
-
* - A word (extended word characters)
|
|
246
|
-
* - A whitespace run
|
|
247
|
-
* - A single punctuation/symbol character
|
|
248
|
-
*/
|
|
249
|
-
function tokenizeWords(text) {
|
|
250
|
-
if (!text)
|
|
251
|
-
return [];
|
|
252
|
-
return text.match(TOKENIZE_REGEX) || [];
|
|
253
|
-
}
|
|
254
|
-
/**
|
|
255
|
-
* Find the longest common prefix between two strings.
|
|
256
|
-
*/
|
|
257
|
-
function longestCommonPrefix(a, b) {
|
|
258
|
-
let i = 0;
|
|
259
|
-
while (i < a.length && i < b.length && a[i] === b[i]) {
|
|
260
|
-
i++;
|
|
261
|
-
}
|
|
262
|
-
return a.slice(0, i);
|
|
263
|
-
}
|
|
264
|
-
/**
|
|
265
|
-
* Find the longest common suffix between two strings.
|
|
266
|
-
*/
|
|
267
|
-
function longestCommonSuffix(a, b) {
|
|
268
|
-
let i = 0;
|
|
269
|
-
while (i < a.length &&
|
|
270
|
-
i < b.length &&
|
|
271
|
-
a[a.length - 1 - i] === b[b.length - 1 - i]) {
|
|
272
|
-
i++;
|
|
273
|
-
}
|
|
274
|
-
return a.slice(a.length - i);
|
|
275
|
-
}
|
|
276
|
-
/**
|
|
277
|
-
* Check if a string is only whitespace.
|
|
278
|
-
*/
|
|
279
|
-
function isWhitespace(str) {
|
|
280
|
-
return /^\s*$/.test(str);
|
|
281
|
-
}
|
|
282
|
-
/**
|
|
283
|
-
* Deduplicate whitespace in change objects.
|
|
284
|
-
* This is a simplified version of jsdiff's dedupeWhitespaceInChangeObjects.
|
|
285
|
-
*
|
|
286
|
-
* Handles three main scenarios:
|
|
287
|
-
* 1. Deletion followed by insertion - extract common leading/trailing whitespace
|
|
288
|
-
* 2. Lone insertion after unchanged - strip duplicate leading whitespace
|
|
289
|
-
* 3. Lone deletion between unchanged - distribute whitespace properly
|
|
290
|
-
*/
|
|
291
|
-
function dedupeWhitespaceInChangeObjects(changes) {
|
|
292
|
-
const result = [];
|
|
293
|
-
for (let i = 0; i < changes.length; i++) {
|
|
294
|
-
const change = changes[i];
|
|
295
|
-
// Scenario 1: Deletion followed by insertion
|
|
296
|
-
if (change.removed && changes[i + 1]?.added) {
|
|
297
|
-
const deletion = change;
|
|
298
|
-
const insertion = changes[i + 1];
|
|
299
|
-
// Find common prefix (must be whitespace)
|
|
300
|
-
const commonPrefix = longestCommonPrefix(deletion.value, insertion.value);
|
|
301
|
-
const wsPrefix = commonPrefix.match(/^\s*/)?.[0] || "";
|
|
302
|
-
// Find common suffix (must be whitespace)
|
|
303
|
-
const delWithoutPrefix = deletion.value.slice(wsPrefix.length);
|
|
304
|
-
const insWithoutPrefix = insertion.value.slice(wsPrefix.length);
|
|
305
|
-
const commonSuffix = longestCommonSuffix(delWithoutPrefix, insWithoutPrefix);
|
|
306
|
-
const wsSuffix = commonSuffix.match(/\s*$/)?.[0] || "";
|
|
307
|
-
// Build the cleaned changes
|
|
308
|
-
if (wsPrefix) {
|
|
309
|
-
result.push({ value: wsPrefix, count: 1 });
|
|
310
|
-
}
|
|
311
|
-
const cleanedDel = deletion.value.slice(wsPrefix.length, deletion.value.length - wsSuffix.length);
|
|
312
|
-
const cleanedIns = insertion.value.slice(wsPrefix.length, insertion.value.length - wsSuffix.length);
|
|
313
|
-
if (cleanedDel) {
|
|
314
|
-
result.push({ value: cleanedDel, removed: true, count: 1 });
|
|
315
|
-
}
|
|
316
|
-
if (cleanedIns) {
|
|
317
|
-
result.push({ value: cleanedIns, added: true, count: 1 });
|
|
318
|
-
}
|
|
319
|
-
if (wsSuffix) {
|
|
320
|
-
result.push({ value: wsSuffix, count: 1 });
|
|
321
|
-
}
|
|
322
|
-
i++; // Skip the insertion since we processed it
|
|
323
|
-
continue;
|
|
324
|
-
}
|
|
325
|
-
// Scenario 2: Lone insertion after unchanged text
|
|
326
|
-
if (change.added && i > 0 && !changes[i - 1].added && !changes[i - 1].removed) {
|
|
327
|
-
const prev = result[result.length - 1];
|
|
328
|
-
if (prev && !prev.added && !prev.removed) {
|
|
329
|
-
// Check for duplicate leading whitespace
|
|
330
|
-
const leadingWs = change.value.match(/^\s*/)?.[0] || "";
|
|
331
|
-
const trailingWs = prev.value.match(/\s*$/)?.[0] || "";
|
|
332
|
-
if (leadingWs && trailingWs) {
|
|
333
|
-
const overlap = longestCommonSuffix(trailingWs, leadingWs);
|
|
334
|
-
if (overlap) {
|
|
335
|
-
// Remove overlap from the insertion
|
|
336
|
-
result.push({
|
|
337
|
-
value: change.value.slice(overlap.length),
|
|
338
|
-
added: true,
|
|
339
|
-
count: 1,
|
|
340
|
-
});
|
|
341
|
-
continue;
|
|
342
|
-
}
|
|
343
|
-
}
|
|
344
|
-
}
|
|
345
|
-
}
|
|
346
|
-
// Scenario 3: Lone deletion between unchanged text
|
|
347
|
-
if (change.removed &&
|
|
348
|
-
!changes[i + 1]?.added &&
|
|
349
|
-
i > 0 &&
|
|
350
|
-
!changes[i - 1]?.added &&
|
|
351
|
-
!changes[i - 1]?.removed) {
|
|
352
|
-
const prev = result[result.length - 1];
|
|
353
|
-
const next = changes[i + 1];
|
|
354
|
-
if (prev && next && !next.added && !next.removed) {
|
|
355
|
-
const leadingWs = change.value.match(/^\s*/)?.[0] || "";
|
|
356
|
-
const trailingWs = change.value.match(/\s*$/)?.[0] || "";
|
|
357
|
-
const prevTrailingWs = prev.value.match(/\s*$/)?.[0] || "";
|
|
358
|
-
const nextLeadingWs = next.value.match(/^\s*/)?.[0] || "";
|
|
359
|
-
// If deletion starts/ends with whitespace that overlaps with neighbors
|
|
360
|
-
if (leadingWs && prevTrailingWs) {
|
|
361
|
-
const overlap = longestCommonSuffix(prevTrailingWs, leadingWs);
|
|
362
|
-
if (overlap.length === leadingWs.length) {
|
|
363
|
-
// Leading whitespace is already in prev, strip it
|
|
364
|
-
result.push({
|
|
365
|
-
value: change.value.slice(leadingWs.length),
|
|
366
|
-
removed: true,
|
|
367
|
-
count: 1,
|
|
368
|
-
});
|
|
369
|
-
continue;
|
|
370
|
-
}
|
|
371
|
-
}
|
|
372
|
-
if (trailingWs && nextLeadingWs) {
|
|
373
|
-
const overlap = longestCommonPrefix(trailingWs, nextLeadingWs);
|
|
374
|
-
if (overlap.length === trailingWs.length) {
|
|
375
|
-
// Trailing whitespace will be in next, strip it
|
|
376
|
-
result.push({
|
|
377
|
-
value: change.value.slice(0, -trailingWs.length) || change.value,
|
|
378
|
-
removed: true,
|
|
379
|
-
count: 1,
|
|
380
|
-
});
|
|
381
|
-
continue;
|
|
382
|
-
}
|
|
383
|
-
}
|
|
384
|
-
}
|
|
385
|
-
}
|
|
386
|
-
// Default: just add the change as-is
|
|
387
|
-
result.push({ ...change });
|
|
388
|
-
}
|
|
389
|
-
return mergeConsecutiveChanges(result);
|
|
390
|
-
}
|
|
391
|
-
/**
|
|
392
|
-
* Compare two strings line by line.
|
|
393
|
-
* Similar to Diff.diffLines from the 'diff' package.
|
|
394
|
-
*/
|
|
395
|
-
export function diffLines(oldStr, newStr) {
|
|
396
|
-
const oldLines = splitLines(oldStr);
|
|
397
|
-
const newLines = splitLines(newStr);
|
|
398
|
-
return computeDiff(oldLines, newLines);
|
|
399
|
-
}
|
|
400
|
-
/**
|
|
401
|
-
* Compare two strings word by word, preserving whitespace.
|
|
402
|
-
* Similar to Diff.diffWordsWithSpace from the 'diff' package.
|
|
403
|
-
*
|
|
404
|
-
* Features matching jsdiff:
|
|
405
|
-
* - Extended Unicode word character support
|
|
406
|
-
* - Proper tokenization (words, whitespace runs, single punctuation)
|
|
407
|
-
* - Whitespace deduplication in consecutive changes
|
|
408
|
-
*/
|
|
409
|
-
export function diffWordsWithSpace(oldStr, newStr) {
|
|
410
|
-
const oldWords = tokenizeWords(oldStr);
|
|
411
|
-
const newWords = tokenizeWords(newStr);
|
|
412
|
-
const diff = computeDiff(oldWords, newWords);
|
|
413
|
-
return dedupeWhitespaceInChangeObjects(diff);
|
|
414
|
-
}
|
package/lib/utils/sha.d.ts
DELETED
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Pure JavaScript SHA-1 implementation.
|
|
3
|
-
* Based on the FIPS 180-4 specification.
|
|
4
|
-
* No external dependencies.
|
|
5
|
-
*/
|
|
6
|
-
/**
|
|
7
|
-
* Computes a SHA-1 hash of the provided data.
|
|
8
|
-
* Used internally by generateCitationKey in react/utils.ts
|
|
9
|
-
*/
|
|
10
|
-
export declare function sha1Hash(data: string | any): string;
|
package/lib/utils/sha.js
DELETED
|
@@ -1,108 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Pure JavaScript SHA-1 implementation.
|
|
3
|
-
* Based on the FIPS 180-4 specification.
|
|
4
|
-
* No external dependencies.
|
|
5
|
-
*/
|
|
6
|
-
function utf8Encode(str) {
|
|
7
|
-
const encoder = new TextEncoder();
|
|
8
|
-
return encoder.encode(str);
|
|
9
|
-
}
|
|
10
|
-
function sha1(message) {
|
|
11
|
-
// Initial hash values
|
|
12
|
-
let h0 = 0x67452301;
|
|
13
|
-
let h1 = 0xefcdab89;
|
|
14
|
-
let h2 = 0x98badcfe;
|
|
15
|
-
let h3 = 0x10325476;
|
|
16
|
-
let h4 = 0xc3d2e1f0;
|
|
17
|
-
// Pre-processing: adding padding bits
|
|
18
|
-
const msgLen = message.length;
|
|
19
|
-
const bitLen = msgLen * 8;
|
|
20
|
-
// Calculate padded length: message + 1 (0x80) + padding + 8 (length)
|
|
21
|
-
// Total must be multiple of 64 bytes (512 bits)
|
|
22
|
-
const totalLen = msgLen + 1 + 8; // minimum: msg + 0x80 + 64-bit length
|
|
23
|
-
const paddedLen = Math.ceil(totalLen / 64) * 64;
|
|
24
|
-
// Create padded buffer
|
|
25
|
-
const padded = new ArrayBuffer(paddedLen);
|
|
26
|
-
const paddedView = new Uint8Array(padded);
|
|
27
|
-
const dataView = new DataView(padded);
|
|
28
|
-
// Copy message
|
|
29
|
-
paddedView.set(message);
|
|
30
|
-
// Append bit '1' (0x80)
|
|
31
|
-
paddedView[msgLen] = 0x80;
|
|
32
|
-
// Append length as 64-bit big-endian (in bits)
|
|
33
|
-
// High 32 bits (for messages > 512MB, which we don't support)
|
|
34
|
-
dataView.setUint32(paddedLen - 8, Math.floor(bitLen / 0x100000000), false);
|
|
35
|
-
// Low 32 bits
|
|
36
|
-
dataView.setUint32(paddedLen - 4, bitLen >>> 0, false);
|
|
37
|
-
// Process each 512-bit (64-byte) chunk
|
|
38
|
-
const w = new Uint32Array(80);
|
|
39
|
-
for (let offset = 0; offset < paddedLen; offset += 64) {
|
|
40
|
-
// Break chunk into sixteen 32-bit big-endian words
|
|
41
|
-
for (let i = 0; i < 16; i++) {
|
|
42
|
-
w[i] = dataView.getUint32(offset + i * 4, false);
|
|
43
|
-
}
|
|
44
|
-
// Extend the sixteen 32-bit words into eighty 32-bit words
|
|
45
|
-
for (let i = 16; i < 80; i++) {
|
|
46
|
-
const val = w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16];
|
|
47
|
-
w[i] = (val << 1) | (val >>> 31);
|
|
48
|
-
}
|
|
49
|
-
// Initialize working variables
|
|
50
|
-
let a = h0;
|
|
51
|
-
let b = h1;
|
|
52
|
-
let c = h2;
|
|
53
|
-
let d = h3;
|
|
54
|
-
let e = h4;
|
|
55
|
-
// Main loop
|
|
56
|
-
for (let i = 0; i < 80; i++) {
|
|
57
|
-
let f;
|
|
58
|
-
let k;
|
|
59
|
-
if (i < 20) {
|
|
60
|
-
f = (b & c) | (~b & d);
|
|
61
|
-
k = 0x5a827999;
|
|
62
|
-
}
|
|
63
|
-
else if (i < 40) {
|
|
64
|
-
f = b ^ c ^ d;
|
|
65
|
-
k = 0x6ed9eba1;
|
|
66
|
-
}
|
|
67
|
-
else if (i < 60) {
|
|
68
|
-
f = (b & c) | (b & d) | (c & d);
|
|
69
|
-
k = 0x8f1bbcdc;
|
|
70
|
-
}
|
|
71
|
-
else {
|
|
72
|
-
f = b ^ c ^ d;
|
|
73
|
-
k = 0xca62c1d6;
|
|
74
|
-
}
|
|
75
|
-
const temp = (((a << 5) | (a >>> 27)) + f + e + k + w[i]) >>> 0;
|
|
76
|
-
e = d;
|
|
77
|
-
d = c;
|
|
78
|
-
c = ((b << 30) | (b >>> 2)) >>> 0;
|
|
79
|
-
b = a;
|
|
80
|
-
a = temp;
|
|
81
|
-
}
|
|
82
|
-
// Add this chunk's hash to result
|
|
83
|
-
h0 = (h0 + a) >>> 0;
|
|
84
|
-
h1 = (h1 + b) >>> 0;
|
|
85
|
-
h2 = (h2 + c) >>> 0;
|
|
86
|
-
h3 = (h3 + d) >>> 0;
|
|
87
|
-
h4 = (h4 + e) >>> 0;
|
|
88
|
-
}
|
|
89
|
-
// Produce the final hash value (160-bit) as hex string
|
|
90
|
-
const hex = (n) => n.toString(16).padStart(8, "0");
|
|
91
|
-
return hex(h0) + hex(h1) + hex(h2) + hex(h3) + hex(h4);
|
|
92
|
-
}
|
|
93
|
-
/**
|
|
94
|
-
* Computes a SHA-1 hash of the provided data.
|
|
95
|
-
* Used internally by generateCitationKey in react/utils.ts
|
|
96
|
-
*/
|
|
97
|
-
export function sha1Hash(data) {
|
|
98
|
-
try {
|
|
99
|
-
if (!data)
|
|
100
|
-
return "";
|
|
101
|
-
const str = typeof data === "string" ? data : JSON.stringify(data);
|
|
102
|
-
return sha1(utf8Encode(str));
|
|
103
|
-
}
|
|
104
|
-
catch (error) {
|
|
105
|
-
console.error("Error in making the hash:", error);
|
|
106
|
-
}
|
|
107
|
-
return "";
|
|
108
|
-
}
|