bkui-vue 2.0.2-beta.45 → 2.0.2-beta.47
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs.js +47 -42
- package/dist/index.esm.js +7204 -7010
- package/dist/index.umd.js +50 -45
- package/lib/affix/index.js +12 -14
- package/lib/alert/index.js +12 -14
- package/lib/animate-number/index.js +11 -12
- package/lib/backtop/index.js +10 -11
- package/lib/badge/index.js +12 -14
- package/lib/breadcrumb/index.js +12 -14
- package/lib/button/index.js +12 -14
- package/lib/card/index.js +12 -14
- package/lib/cascader/index.js +94 -101
- package/lib/checkbox/index.js +37 -35
- package/lib/code-diff/index.js +1171 -675
- package/lib/collapse/index.js +44 -42
- package/lib/collapse-transition/index.js +3 -4
- package/lib/color-picker/index.js +59 -60
- package/lib/config-provider/index.js +12 -14
- package/lib/container/index.js +21 -25
- package/lib/date-picker/index.js +58 -51
- package/lib/dialog/index.js +53 -474
- package/lib/directives/index.js +37 -34
- package/lib/divider/index.js +2 -3
- package/lib/dropdown/index.js +17 -21
- package/lib/exception/index.js +14 -17
- package/lib/fixed-navbar/index.js +2 -3
- package/lib/form/index.js +37 -34
- package/lib/icon/angle-double-down-line.js +29 -36
- package/lib/icon/angle-double-left-line.js +29 -36
- package/lib/icon/angle-double-left.js +29 -36
- package/lib/icon/angle-double-right-line.js +29 -36
- package/lib/icon/angle-double-right.js +29 -36
- package/lib/icon/angle-double-up-line.js +29 -36
- package/lib/icon/angle-down-fill.js +29 -36
- package/lib/icon/angle-down-line.js +29 -36
- package/lib/icon/angle-down.js +29 -36
- package/lib/icon/angle-left.js +29 -36
- package/lib/icon/angle-right.js +29 -36
- package/lib/icon/angle-up-fill.js +29 -36
- package/lib/icon/angle-up.js +29 -36
- package/lib/icon/archive-fill.js +29 -36
- package/lib/icon/arrows-left.js +29 -36
- package/lib/icon/arrows-right.js +29 -36
- package/lib/icon/assistant.js +29 -36
- package/lib/icon/audio-fill.js +29 -36
- package/lib/icon/bk.js +29 -36
- package/lib/icon/circle.js +29 -36
- package/lib/icon/close-line.js +29 -36
- package/lib/icon/close.js +29 -36
- package/lib/icon/code.js +29 -36
- package/lib/icon/cog-shape.js +29 -36
- package/lib/icon/collapse-left.js +29 -36
- package/lib/icon/copy-shape.js +29 -36
- package/lib/icon/copy.js +29 -36
- package/lib/icon/data-shape.js +29 -36
- package/lib/icon/del.js +29 -36
- package/lib/icon/doc-fill.js +29 -36
- package/lib/icon/done.js +29 -36
- package/lib/icon/down-shape.js +29 -36
- package/lib/icon/down-small.js +29 -36
- package/lib/icon/edit-line.js +29 -36
- package/lib/icon/ellipsis.js +29 -36
- package/lib/icon/enlarge-line.js +29 -36
- package/lib/icon/error.js +29 -36
- package/lib/icon/excel-fill.js +29 -36
- package/lib/icon/exclamation-circle-shape.js +29 -36
- package/lib/icon/eye.js +29 -36
- package/lib/icon/filliscreen-line.js +29 -36
- package/lib/icon/fix-line.js +29 -36
- package/lib/icon/fix-shape.js +29 -36
- package/lib/icon/folder-open.js +29 -36
- package/lib/icon/folder-shape-open.js +29 -36
- package/lib/icon/folder-shape.js +29 -36
- package/lib/icon/folder.js +29 -36
- package/lib/icon/funnel.js +29 -36
- package/lib/icon/grag-fill.js +29 -36
- package/lib/icon/help-document-fill.js +29 -36
- package/lib/icon/help-fill.js +29 -36
- package/lib/icon/help.js +29 -36
- package/lib/icon/image-fill.js +29 -36
- package/lib/icon/img-error.js +29 -36
- package/lib/icon/img-placehoulder.js +29 -36
- package/lib/icon/info-line.js +29 -36
- package/lib/icon/info.js +29 -36
- package/lib/icon/left-shape.js +29 -36
- package/lib/icon/left-turn-line.js +29 -36
- package/lib/icon/loading.js +29 -36
- package/lib/icon/narrow-line.js +29 -36
- package/lib/icon/original.js +29 -36
- package/lib/icon/pdf-fill.js +29 -36
- package/lib/icon/play-shape.js +29 -36
- package/lib/icon/plus.js +29 -36
- package/lib/icon/ppt-fill.js +29 -36
- package/lib/icon/qq.js +29 -36
- package/lib/icon/right-shape.js +29 -36
- package/lib/icon/right-turn-line.js +29 -36
- package/lib/icon/search.js +29 -36
- package/lib/icon/share.js +29 -36
- package/lib/icon/spinner.js +29 -36
- package/lib/icon/success.js +29 -36
- package/lib/icon/switcher-loading.js +29 -36
- package/lib/icon/text-all.js +29 -36
- package/lib/icon/text-file.js +29 -36
- package/lib/icon/text-fill.js +29 -36
- package/lib/icon/transfer.js +29 -36
- package/lib/icon/tree-application-shape.js +29 -36
- package/lib/icon/unfull-screen.js +29 -36
- package/lib/icon/unvisible.js +29 -36
- package/lib/icon/up-shape.js +29 -36
- package/lib/icon/upload.js +29 -36
- package/lib/icon/video-fill.js +29 -36
- package/lib/icon/warn.js +29 -36
- package/lib/icon/weixin-pro.js +29 -36
- package/lib/icon/weixin.js +29 -36
- package/lib/image/index.js +49 -47
- package/lib/index.js +1 -1
- package/lib/info-box/index.js +62 -461
- package/lib/input/index.js +62 -60
- package/lib/link/index.js +2 -2
- package/lib/loading/index.js +22 -25
- package/lib/menu/index.js +36 -34
- package/lib/message/index.js +34 -31
- package/lib/modal/index.js +12 -14
- package/lib/navigation/index.js +7 -9
- package/lib/notify/index.js +12 -14
- package/lib/overflow-title/index.js +9 -10
- package/lib/pagination/index.js +32 -29
- package/lib/pop-confirm/index.js +4 -5
- package/lib/popover/index.js +209 -136
- package/lib/popover2/index.js +2 -2
- package/lib/process/index.js +59 -452
- package/lib/progress/index.js +16 -19
- package/lib/radio/index.js +37 -34
- package/lib/rate/index.js +12 -14
- package/lib/resize-layout/index.js +18 -20
- package/lib/scrollbar/index.js +51 -58
- package/lib/search-select/index.js +222 -595
- package/lib/select/index.js +111 -463
- package/lib/shared/index.js +83 -95
- package/lib/sideslider/index.js +43 -441
- package/lib/slider/index.js +49 -446
- package/lib/steps/index.js +60 -458
- package/lib/swiper/index.js +15 -17
- package/lib/switcher/index.js +12 -14
- package/lib/tab/index.d.ts +12 -0
- package/lib/tab/index.js +53 -45
- package/lib/tab/props.d.ts +3 -0
- package/lib/tab/tab.d.ts +6 -0
- package/lib/table/index.js +152 -501
- package/lib/tag/index.js +12 -14
- package/lib/tag-input/index.js +47 -41
- package/lib/timeline/index.js +1 -0
- package/lib/transfer/index.js +22 -17
- package/lib/tree/index.js +73 -66
- package/lib/upload/index.js +136 -487
- package/lib/virtual-render/index.js +47 -51
- package/package.json +1 -1
package/lib/code-diff/index.js
CHANGED
@@ -1004,9 +1004,10 @@ var x = y => { var x = {}; __webpack_require__.d(x, y); return x; }
|
|
1004
1004
|
var y = x => () => x
|
1005
1005
|
const shared_namespaceObject = x({ ["PropTypes"]: () => __WEBPACK_EXTERNAL_MODULE_bkui_vue_lib_shared_edbdfb03__.PropTypes, ["classes"]: () => __WEBPACK_EXTERNAL_MODULE_bkui_vue_lib_shared_edbdfb03__.classes, ["stringEnum"]: () => __WEBPACK_EXTERNAL_MODULE_bkui_vue_lib_shared_edbdfb03__.stringEnum, ["withInstall"]: () => __WEBPACK_EXTERNAL_MODULE_bkui_vue_lib_shared_edbdfb03__.withInstall });
|
1006
1006
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/arrayWithHoles.js
|
1007
|
-
function _arrayWithHoles(
|
1008
|
-
if (Array.isArray(
|
1007
|
+
function _arrayWithHoles(r) {
|
1008
|
+
if (Array.isArray(r)) return r;
|
1009
1009
|
}
|
1010
|
+
|
1010
1011
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/iterableToArrayLimit.js
|
1011
1012
|
function _iterableToArrayLimit(r, l) {
|
1012
1013
|
var t = null == r ? null : "undefined" != typeof Symbol && r[Symbol.iterator] || r["@@iterator"];
|
@@ -1035,34 +1036,38 @@ function _iterableToArrayLimit(r, l) {
|
|
1035
1036
|
return a;
|
1036
1037
|
}
|
1037
1038
|
}
|
1039
|
+
|
1038
1040
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/arrayLikeToArray.js
|
1039
|
-
function _arrayLikeToArray(
|
1040
|
-
|
1041
|
-
for (var
|
1042
|
-
return
|
1041
|
+
function _arrayLikeToArray(r, a) {
|
1042
|
+
(null == a || a > r.length) && (a = r.length);
|
1043
|
+
for (var e = 0, n = Array(a); e < a; e++) n[e] = r[e];
|
1044
|
+
return n;
|
1043
1045
|
}
|
1046
|
+
|
1044
1047
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/unsupportedIterableToArray.js
|
1045
1048
|
|
1046
|
-
function _unsupportedIterableToArray(
|
1047
|
-
if (
|
1048
|
-
|
1049
|
-
|
1050
|
-
|
1051
|
-
|
1052
|
-
if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
|
1049
|
+
function _unsupportedIterableToArray(r, a) {
|
1050
|
+
if (r) {
|
1051
|
+
if ("string" == typeof r) return _arrayLikeToArray(r, a);
|
1052
|
+
var t = {}.toString.call(r).slice(8, -1);
|
1053
|
+
return "Object" === t && r.constructor && (t = r.constructor.name), "Map" === t || "Set" === t ? Array.from(r) : "Arguments" === t || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(t) ? _arrayLikeToArray(r, a) : void 0;
|
1054
|
+
}
|
1053
1055
|
}
|
1056
|
+
|
1054
1057
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/nonIterableRest.js
|
1055
1058
|
function _nonIterableRest() {
|
1056
1059
|
throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
|
1057
1060
|
}
|
1061
|
+
|
1058
1062
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/slicedToArray.js
|
1059
1063
|
|
1060
1064
|
|
1061
1065
|
|
1062
1066
|
|
1063
|
-
function _slicedToArray(
|
1064
|
-
return _arrayWithHoles(
|
1067
|
+
function _slicedToArray(r, e) {
|
1068
|
+
return _arrayWithHoles(r) || _iterableToArrayLimit(r, e) || _unsupportedIterableToArray(r, e) || _nonIterableRest();
|
1065
1069
|
}
|
1070
|
+
|
1066
1071
|
;// CONCATENATED MODULE: external "vue"
|
1067
1072
|
var external_vue_x = y => { var x = {}; __webpack_require__.d(x, y); return x; }
|
1068
1073
|
var external_vue_y = x => () => x
|
@@ -2693,6 +2698,14 @@ function hashCode(text) {
|
|
2693
2698
|
}
|
2694
2699
|
return hash;
|
2695
2700
|
}
|
2701
|
+
function max(arr) {
|
2702
|
+
const length = arr.length;
|
2703
|
+
let max = -Infinity;
|
2704
|
+
for (let i = 0; i < length; i++) {
|
2705
|
+
max = Math.max(max, arr[i]);
|
2706
|
+
}
|
2707
|
+
return max;
|
2708
|
+
}
|
2696
2709
|
//# sourceMappingURL=utils.js.map
|
2697
2710
|
;// CONCATENATED MODULE: ../../node_modules/diff2html/lib-esm/diff-parser.js
|
2698
2711
|
|
@@ -3009,7 +3022,8 @@ function parse(diffInput, config = {}) {
|
|
3009
3022
|
else if ((values = index.exec(line))) {
|
3010
3023
|
currentFile.checksumBefore = values[1];
|
3011
3024
|
currentFile.checksumAfter = values[2];
|
3012
|
-
|
3025
|
+
if (values[3])
|
3026
|
+
currentFile.mode = values[3];
|
3013
3027
|
}
|
3014
3028
|
else if ((values = combinedIndex.exec(line))) {
|
3015
3029
|
currentFile.checksumBefore = [values[2], values[3]];
|
@@ -3037,211 +3051,245 @@ function parse(diffInput, config = {}) {
|
|
3037
3051
|
function lib_Diff() {}
|
3038
3052
|
lib_Diff.prototype = {
|
3039
3053
|
diff: function diff(oldString, newString) {
|
3054
|
+
var _options$timeout;
|
3040
3055
|
var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
|
3041
3056
|
var callback = options.callback;
|
3042
|
-
|
3043
3057
|
if (typeof options === 'function') {
|
3044
3058
|
callback = options;
|
3045
3059
|
options = {};
|
3046
3060
|
}
|
3047
|
-
|
3048
|
-
this.options = options;
|
3049
3061
|
var self = this;
|
3050
|
-
|
3051
3062
|
function done(value) {
|
3063
|
+
value = self.postProcess(value, options);
|
3052
3064
|
if (callback) {
|
3053
3065
|
setTimeout(function () {
|
3054
|
-
callback(
|
3066
|
+
callback(value);
|
3055
3067
|
}, 0);
|
3056
3068
|
return true;
|
3057
3069
|
} else {
|
3058
3070
|
return value;
|
3059
3071
|
}
|
3060
|
-
}
|
3061
|
-
|
3072
|
+
}
|
3062
3073
|
|
3063
|
-
|
3064
|
-
|
3065
|
-
|
3066
|
-
|
3074
|
+
// Allow subclasses to massage the input prior to running
|
3075
|
+
oldString = this.castInput(oldString, options);
|
3076
|
+
newString = this.castInput(newString, options);
|
3077
|
+
oldString = this.removeEmpty(this.tokenize(oldString, options));
|
3078
|
+
newString = this.removeEmpty(this.tokenize(newString, options));
|
3067
3079
|
var newLen = newString.length,
|
3068
|
-
|
3080
|
+
oldLen = oldString.length;
|
3069
3081
|
var editLength = 1;
|
3070
3082
|
var maxEditLength = newLen + oldLen;
|
3071
|
-
|
3072
|
-
if (options.maxEditLength) {
|
3083
|
+
if (options.maxEditLength != null) {
|
3073
3084
|
maxEditLength = Math.min(maxEditLength, options.maxEditLength);
|
3074
3085
|
}
|
3075
|
-
|
3086
|
+
var maxExecutionTime = (_options$timeout = options.timeout) !== null && _options$timeout !== void 0 ? _options$timeout : Infinity;
|
3087
|
+
var abortAfterTimestamp = Date.now() + maxExecutionTime;
|
3076
3088
|
var bestPath = [{
|
3077
|
-
|
3078
|
-
|
3079
|
-
}];
|
3080
|
-
|
3081
|
-
var oldPos = this.extractCommon(bestPath[0], newString, oldString, 0);
|
3089
|
+
oldPos: -1,
|
3090
|
+
lastComponent: undefined
|
3091
|
+
}];
|
3082
3092
|
|
3083
|
-
|
3093
|
+
// Seed editLength = 0, i.e. the content starts with the same values
|
3094
|
+
var newPos = this.extractCommon(bestPath[0], newString, oldString, 0, options);
|
3095
|
+
if (bestPath[0].oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
|
3084
3096
|
// Identity per the equality and tokenizer
|
3085
|
-
return done([
|
3086
|
-
|
3087
|
-
|
3088
|
-
|
3089
|
-
|
3090
|
-
|
3091
|
-
|
3097
|
+
return done(lib_buildValues(self, bestPath[0].lastComponent, newString, oldString, self.useLongestToken));
|
3098
|
+
}
|
3099
|
+
|
3100
|
+
// Once we hit the right edge of the edit graph on some diagonal k, we can
|
3101
|
+
// definitely reach the end of the edit graph in no more than k edits, so
|
3102
|
+
// there's no point in considering any moves to diagonal k+1 any more (from
|
3103
|
+
// which we're guaranteed to need at least k+1 more edits).
|
3104
|
+
// Similarly, once we've reached the bottom of the edit graph, there's no
|
3105
|
+
// point considering moves to lower diagonals.
|
3106
|
+
// We record this fact by setting minDiagonalToConsider and
|
3107
|
+
// maxDiagonalToConsider to some finite value once we've hit the edge of
|
3108
|
+
// the edit graph.
|
3109
|
+
// This optimization is not faithful to the original algorithm presented in
|
3110
|
+
// Myers's paper, which instead pointlessly extends D-paths off the end of
|
3111
|
+
// the edit graph - see page 7 of Myers's paper which notes this point
|
3112
|
+
// explicitly and illustrates it with a diagram. This has major performance
|
3113
|
+
// implications for some common scenarios. For instance, to compute a diff
|
3114
|
+
// where the new text simply appends d characters on the end of the
|
3115
|
+
// original text of length n, the true Myers algorithm will take O(n+d^2)
|
3116
|
+
// time while this optimization needs only O(n+d) time.
|
3117
|
+
var minDiagonalToConsider = -Infinity,
|
3118
|
+
maxDiagonalToConsider = Infinity;
|
3119
|
+
|
3120
|
+
// Main worker method. checks all permutations of a given edit length for acceptance.
|
3092
3121
|
function execEditLength() {
|
3093
|
-
for (var diagonalPath = -
|
3122
|
+
for (var diagonalPath = Math.max(minDiagonalToConsider, -editLength); diagonalPath <= Math.min(maxDiagonalToConsider, editLength); diagonalPath += 2) {
|
3094
3123
|
var basePath = void 0;
|
3095
|
-
|
3096
|
-
|
3097
|
-
|
3098
|
-
_oldPos = (removePath ? removePath.newPos : 0) - diagonalPath;
|
3099
|
-
|
3100
|
-
if (addPath) {
|
3124
|
+
var removePath = bestPath[diagonalPath - 1],
|
3125
|
+
addPath = bestPath[diagonalPath + 1];
|
3126
|
+
if (removePath) {
|
3101
3127
|
// No one else is going to attempt to use this value, clear it
|
3102
3128
|
bestPath[diagonalPath - 1] = undefined;
|
3103
3129
|
}
|
3104
|
-
|
3105
|
-
|
3106
|
-
|
3107
|
-
|
3130
|
+
var canAdd = false;
|
3131
|
+
if (addPath) {
|
3132
|
+
// what newPos will be after we do an insertion:
|
3133
|
+
var addPathNewPos = addPath.oldPos - diagonalPath;
|
3134
|
+
canAdd = addPath && 0 <= addPathNewPos && addPathNewPos < newLen;
|
3135
|
+
}
|
3136
|
+
var canRemove = removePath && removePath.oldPos + 1 < oldLen;
|
3108
3137
|
if (!canAdd && !canRemove) {
|
3109
3138
|
// If this path is a terminal then prune
|
3110
3139
|
bestPath[diagonalPath] = undefined;
|
3111
3140
|
continue;
|
3112
|
-
}
|
3113
|
-
// path whose position in the new string is the farthest from the origin
|
3114
|
-
// and does not pass the bounds of the diff graph
|
3115
|
-
|
3141
|
+
}
|
3116
3142
|
|
3117
|
-
|
3118
|
-
|
3119
|
-
|
3143
|
+
// Select the diagonal that we want to branch from. We select the prior
|
3144
|
+
// path whose position in the old string is the farthest from the origin
|
3145
|
+
// and does not pass the bounds of the diff graph
|
3146
|
+
if (!canRemove || canAdd && removePath.oldPos < addPath.oldPos) {
|
3147
|
+
basePath = self.addToPath(addPath, true, false, 0, options);
|
3120
3148
|
} else {
|
3121
|
-
basePath =
|
3122
|
-
|
3123
|
-
basePath.newPos++;
|
3124
|
-
self.pushComponent(basePath.components, true, undefined);
|
3149
|
+
basePath = self.addToPath(removePath, false, true, 1, options);
|
3125
3150
|
}
|
3126
|
-
|
3127
|
-
|
3128
|
-
|
3129
|
-
|
3130
|
-
return done(lib_buildValues(self, basePath.components, newString, oldString, self.useLongestToken));
|
3151
|
+
newPos = self.extractCommon(basePath, newString, oldString, diagonalPath, options);
|
3152
|
+
if (basePath.oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
|
3153
|
+
// If we have hit the end of both strings, then we are done
|
3154
|
+
return done(lib_buildValues(self, basePath.lastComponent, newString, oldString, self.useLongestToken));
|
3131
3155
|
} else {
|
3132
|
-
// Otherwise track this path as a potential candidate and continue.
|
3133
3156
|
bestPath[diagonalPath] = basePath;
|
3157
|
+
if (basePath.oldPos + 1 >= oldLen) {
|
3158
|
+
maxDiagonalToConsider = Math.min(maxDiagonalToConsider, diagonalPath - 1);
|
3159
|
+
}
|
3160
|
+
if (newPos + 1 >= newLen) {
|
3161
|
+
minDiagonalToConsider = Math.max(minDiagonalToConsider, diagonalPath + 1);
|
3162
|
+
}
|
3134
3163
|
}
|
3135
3164
|
}
|
3136
|
-
|
3137
3165
|
editLength++;
|
3138
|
-
}
|
3166
|
+
}
|
3167
|
+
|
3168
|
+
// Performs the length of edit iteration. Is a bit fugly as this has to support the
|
3139
3169
|
// sync and async mode which is never fun. Loops over execEditLength until a value
|
3140
3170
|
// is produced, or until the edit length exceeds options.maxEditLength (if given),
|
3141
3171
|
// in which case it will return undefined.
|
3142
|
-
|
3143
|
-
|
3144
3172
|
if (callback) {
|
3145
3173
|
(function exec() {
|
3146
3174
|
setTimeout(function () {
|
3147
|
-
if (editLength > maxEditLength) {
|
3175
|
+
if (editLength > maxEditLength || Date.now() > abortAfterTimestamp) {
|
3148
3176
|
return callback();
|
3149
3177
|
}
|
3150
|
-
|
3151
3178
|
if (!execEditLength()) {
|
3152
3179
|
exec();
|
3153
3180
|
}
|
3154
3181
|
}, 0);
|
3155
3182
|
})();
|
3156
3183
|
} else {
|
3157
|
-
while (editLength <= maxEditLength) {
|
3184
|
+
while (editLength <= maxEditLength && Date.now() <= abortAfterTimestamp) {
|
3158
3185
|
var ret = execEditLength();
|
3159
|
-
|
3160
3186
|
if (ret) {
|
3161
3187
|
return ret;
|
3162
3188
|
}
|
3163
3189
|
}
|
3164
3190
|
}
|
3165
3191
|
},
|
3166
|
-
|
3167
|
-
var last =
|
3168
|
-
|
3169
|
-
|
3170
|
-
|
3171
|
-
|
3172
|
-
|
3173
|
-
|
3174
|
-
|
3175
|
-
|
3192
|
+
addToPath: function addToPath(path, added, removed, oldPosInc, options) {
|
3193
|
+
var last = path.lastComponent;
|
3194
|
+
if (last && !options.oneChangePerToken && last.added === added && last.removed === removed) {
|
3195
|
+
return {
|
3196
|
+
oldPos: path.oldPos + oldPosInc,
|
3197
|
+
lastComponent: {
|
3198
|
+
count: last.count + 1,
|
3199
|
+
added: added,
|
3200
|
+
removed: removed,
|
3201
|
+
previousComponent: last.previousComponent
|
3202
|
+
}
|
3176
3203
|
};
|
3177
3204
|
} else {
|
3178
|
-
|
3179
|
-
|
3180
|
-
|
3181
|
-
|
3182
|
-
|
3205
|
+
return {
|
3206
|
+
oldPos: path.oldPos + oldPosInc,
|
3207
|
+
lastComponent: {
|
3208
|
+
count: 1,
|
3209
|
+
added: added,
|
3210
|
+
removed: removed,
|
3211
|
+
previousComponent: last
|
3212
|
+
}
|
3213
|
+
};
|
3183
3214
|
}
|
3184
3215
|
},
|
3185
|
-
extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath) {
|
3216
|
+
extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath, options) {
|
3186
3217
|
var newLen = newString.length,
|
3187
|
-
|
3188
|
-
|
3189
|
-
|
3190
|
-
|
3191
|
-
|
3192
|
-
while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(newString[newPos + 1], oldString[oldPos + 1])) {
|
3218
|
+
oldLen = oldString.length,
|
3219
|
+
oldPos = basePath.oldPos,
|
3220
|
+
newPos = oldPos - diagonalPath,
|
3221
|
+
commonCount = 0;
|
3222
|
+
while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(oldString[oldPos + 1], newString[newPos + 1], options)) {
|
3193
3223
|
newPos++;
|
3194
3224
|
oldPos++;
|
3195
3225
|
commonCount++;
|
3226
|
+
if (options.oneChangePerToken) {
|
3227
|
+
basePath.lastComponent = {
|
3228
|
+
count: 1,
|
3229
|
+
previousComponent: basePath.lastComponent,
|
3230
|
+
added: false,
|
3231
|
+
removed: false
|
3232
|
+
};
|
3233
|
+
}
|
3196
3234
|
}
|
3197
|
-
|
3198
|
-
|
3199
|
-
|
3200
|
-
|
3201
|
-
|
3235
|
+
if (commonCount && !options.oneChangePerToken) {
|
3236
|
+
basePath.lastComponent = {
|
3237
|
+
count: commonCount,
|
3238
|
+
previousComponent: basePath.lastComponent,
|
3239
|
+
added: false,
|
3240
|
+
removed: false
|
3241
|
+
};
|
3202
3242
|
}
|
3203
|
-
|
3204
|
-
|
3205
|
-
return oldPos;
|
3243
|
+
basePath.oldPos = oldPos;
|
3244
|
+
return newPos;
|
3206
3245
|
},
|
3207
|
-
equals: function equals(left, right) {
|
3208
|
-
if (
|
3209
|
-
return
|
3246
|
+
equals: function equals(left, right, options) {
|
3247
|
+
if (options.comparator) {
|
3248
|
+
return options.comparator(left, right);
|
3210
3249
|
} else {
|
3211
|
-
return left === right ||
|
3250
|
+
return left === right || options.ignoreCase && left.toLowerCase() === right.toLowerCase();
|
3212
3251
|
}
|
3213
3252
|
},
|
3214
3253
|
removeEmpty: function removeEmpty(array) {
|
3215
3254
|
var ret = [];
|
3216
|
-
|
3217
3255
|
for (var i = 0; i < array.length; i++) {
|
3218
3256
|
if (array[i]) {
|
3219
3257
|
ret.push(array[i]);
|
3220
3258
|
}
|
3221
3259
|
}
|
3222
|
-
|
3223
3260
|
return ret;
|
3224
3261
|
},
|
3225
3262
|
castInput: function castInput(value) {
|
3226
3263
|
return value;
|
3227
3264
|
},
|
3228
3265
|
tokenize: function tokenize(value) {
|
3229
|
-
return
|
3266
|
+
return Array.from(value);
|
3230
3267
|
},
|
3231
3268
|
join: function join(chars) {
|
3232
3269
|
return chars.join('');
|
3270
|
+
},
|
3271
|
+
postProcess: function postProcess(changeObjects) {
|
3272
|
+
return changeObjects;
|
3233
3273
|
}
|
3234
3274
|
};
|
3235
|
-
|
3236
|
-
|
3275
|
+
function lib_buildValues(diff, lastComponent, newString, oldString, useLongestToken) {
|
3276
|
+
// First we convert our linked list of components in reverse order to an
|
3277
|
+
// array in the right order:
|
3278
|
+
var components = [];
|
3279
|
+
var nextComponent;
|
3280
|
+
while (lastComponent) {
|
3281
|
+
components.push(lastComponent);
|
3282
|
+
nextComponent = lastComponent.previousComponent;
|
3283
|
+
delete lastComponent.previousComponent;
|
3284
|
+
lastComponent = nextComponent;
|
3285
|
+
}
|
3286
|
+
components.reverse();
|
3237
3287
|
var componentPos = 0,
|
3238
|
-
|
3239
|
-
|
3240
|
-
|
3241
|
-
|
3288
|
+
componentLen = components.length,
|
3289
|
+
newPos = 0,
|
3290
|
+
oldPos = 0;
|
3242
3291
|
for (; componentPos < componentLen; componentPos++) {
|
3243
3292
|
var component = components[componentPos];
|
3244
|
-
|
3245
3293
|
if (!component.removed) {
|
3246
3294
|
if (!component.added && useLongestToken) {
|
3247
3295
|
var value = newString.slice(newPos, newPos + component.count);
|
@@ -3253,66 +3301,133 @@ function lib_buildValues(diff, components, newString, oldString, useLongestToken
|
|
3253
3301
|
} else {
|
3254
3302
|
component.value = diff.join(newString.slice(newPos, newPos + component.count));
|
3255
3303
|
}
|
3304
|
+
newPos += component.count;
|
3256
3305
|
|
3257
|
-
|
3258
|
-
|
3306
|
+
// Common case
|
3259
3307
|
if (!component.added) {
|
3260
3308
|
oldPos += component.count;
|
3261
3309
|
}
|
3262
3310
|
} else {
|
3263
3311
|
component.value = diff.join(oldString.slice(oldPos, oldPos + component.count));
|
3264
|
-
oldPos += component.count;
|
3265
|
-
// The diffing algorithm is tied to add then remove output and this is the simplest
|
3266
|
-
// route to get the desired output with minimal overhead.
|
3267
|
-
|
3268
|
-
if (componentPos && components[componentPos - 1].added) {
|
3269
|
-
var tmp = components[componentPos - 1];
|
3270
|
-
components[componentPos - 1] = components[componentPos];
|
3271
|
-
components[componentPos] = tmp;
|
3272
|
-
}
|
3312
|
+
oldPos += component.count;
|
3273
3313
|
}
|
3274
|
-
} // Special case handle for when one terminal is ignored (i.e. whitespace).
|
3275
|
-
// For this case we merge the terminal into the prior string and drop the change.
|
3276
|
-
// This is only available for string mode.
|
3277
|
-
|
3278
|
-
|
3279
|
-
var lastComponent = components[componentLen - 1];
|
3280
|
-
|
3281
|
-
if (componentLen > 1 && typeof lastComponent.value === 'string' && (lastComponent.added || lastComponent.removed) && diff.equals('', lastComponent.value)) {
|
3282
|
-
components[componentLen - 2].value += lastComponent.value;
|
3283
|
-
components.pop();
|
3284
3314
|
}
|
3285
|
-
|
3286
3315
|
return components;
|
3287
3316
|
}
|
3288
3317
|
|
3289
|
-
function lib_clonePath(path) {
|
3290
|
-
return {
|
3291
|
-
newPos: path.newPos,
|
3292
|
-
components: path.components.slice(0)
|
3293
|
-
};
|
3294
|
-
}
|
3295
|
-
|
3296
3318
|
var lib_characterDiff = new lib_Diff();
|
3297
3319
|
function lib_diffChars(oldStr, newStr, options) {
|
3298
3320
|
return lib_characterDiff.diff(oldStr, newStr, options);
|
3299
3321
|
}
|
3300
3322
|
|
3301
|
-
function
|
3302
|
-
|
3303
|
-
|
3304
|
-
|
3305
|
-
|
3306
|
-
/* istanbul ignore else */
|
3307
|
-
if (options.hasOwnProperty(name)) {
|
3308
|
-
defaults[name] = options[name];
|
3309
|
-
}
|
3323
|
+
function longestCommonPrefix(str1, str2) {
|
3324
|
+
var i;
|
3325
|
+
for (i = 0; i < str1.length && i < str2.length; i++) {
|
3326
|
+
if (str1[i] != str2[i]) {
|
3327
|
+
return str1.slice(0, i);
|
3310
3328
|
}
|
3311
3329
|
}
|
3330
|
+
return str1.slice(0, i);
|
3331
|
+
}
|
3332
|
+
function longestCommonSuffix(str1, str2) {
|
3333
|
+
var i;
|
3312
3334
|
|
3313
|
-
|
3335
|
+
// Unlike longestCommonPrefix, we need a special case to handle all scenarios
|
3336
|
+
// where we return the empty string since str1.slice(-0) will return the
|
3337
|
+
// entire string.
|
3338
|
+
if (!str1 || !str2 || str1[str1.length - 1] != str2[str2.length - 1]) {
|
3339
|
+
return '';
|
3340
|
+
}
|
3341
|
+
for (i = 0; i < str1.length && i < str2.length; i++) {
|
3342
|
+
if (str1[str1.length - (i + 1)] != str2[str2.length - (i + 1)]) {
|
3343
|
+
return str1.slice(-i);
|
3344
|
+
}
|
3345
|
+
}
|
3346
|
+
return str1.slice(-i);
|
3347
|
+
}
|
3348
|
+
function replacePrefix(string, oldPrefix, newPrefix) {
|
3349
|
+
if (string.slice(0, oldPrefix.length) != oldPrefix) {
|
3350
|
+
throw Error("string ".concat(JSON.stringify(string), " doesn't start with prefix ").concat(JSON.stringify(oldPrefix), "; this is a bug"));
|
3351
|
+
}
|
3352
|
+
return newPrefix + string.slice(oldPrefix.length);
|
3353
|
+
}
|
3354
|
+
function replaceSuffix(string, oldSuffix, newSuffix) {
|
3355
|
+
if (!oldSuffix) {
|
3356
|
+
return string + newSuffix;
|
3357
|
+
}
|
3358
|
+
if (string.slice(-oldSuffix.length) != oldSuffix) {
|
3359
|
+
throw Error("string ".concat(JSON.stringify(string), " doesn't end with suffix ").concat(JSON.stringify(oldSuffix), "; this is a bug"));
|
3360
|
+
}
|
3361
|
+
return string.slice(0, -oldSuffix.length) + newSuffix;
|
3362
|
+
}
|
3363
|
+
function removePrefix(string, oldPrefix) {
|
3364
|
+
return replacePrefix(string, oldPrefix, '');
|
3365
|
+
}
|
3366
|
+
function removeSuffix(string, oldSuffix) {
|
3367
|
+
return replaceSuffix(string, oldSuffix, '');
|
3368
|
+
}
|
3369
|
+
function maximumOverlap(string1, string2) {
|
3370
|
+
return string2.slice(0, overlapCount(string1, string2));
|
3371
|
+
}
|
3372
|
+
|
3373
|
+
// Nicked from https://stackoverflow.com/a/60422853/1709587
|
3374
|
+
function overlapCount(a, b) {
|
3375
|
+
// Deal with cases where the strings differ in length
|
3376
|
+
var startA = 0;
|
3377
|
+
if (a.length > b.length) {
|
3378
|
+
startA = a.length - b.length;
|
3379
|
+
}
|
3380
|
+
var endB = b.length;
|
3381
|
+
if (a.length < b.length) {
|
3382
|
+
endB = a.length;
|
3383
|
+
}
|
3384
|
+
// Create a back-reference for each index
|
3385
|
+
// that should be followed in case of a mismatch.
|
3386
|
+
// We only need B to make these references:
|
3387
|
+
var map = Array(endB);
|
3388
|
+
var k = 0; // Index that lags behind j
|
3389
|
+
map[0] = 0;
|
3390
|
+
for (var j = 1; j < endB; j++) {
|
3391
|
+
if (b[j] == b[k]) {
|
3392
|
+
map[j] = map[k]; // skip over the same character (optional optimisation)
|
3393
|
+
} else {
|
3394
|
+
map[j] = k;
|
3395
|
+
}
|
3396
|
+
while (k > 0 && b[j] != b[k]) {
|
3397
|
+
k = map[k];
|
3398
|
+
}
|
3399
|
+
if (b[j] == b[k]) {
|
3400
|
+
k++;
|
3401
|
+
}
|
3402
|
+
}
|
3403
|
+
// Phase 2: use these references while iterating over A
|
3404
|
+
k = 0;
|
3405
|
+
for (var i = startA; i < a.length; i++) {
|
3406
|
+
while (k > 0 && a[i] != b[k]) {
|
3407
|
+
k = map[k];
|
3408
|
+
}
|
3409
|
+
if (a[i] == b[k]) {
|
3410
|
+
k++;
|
3411
|
+
}
|
3412
|
+
}
|
3413
|
+
return k;
|
3414
|
+
}
|
3415
|
+
|
3416
|
+
/**
|
3417
|
+
* Returns true if the string consistently uses Windows line endings.
|
3418
|
+
*/
|
3419
|
+
function hasOnlyWinLineEndings(string) {
|
3420
|
+
return string.includes('\r\n') && !string.startsWith('\n') && !string.match(/[^\r]\n/);
|
3421
|
+
}
|
3422
|
+
|
3423
|
+
/**
|
3424
|
+
* Returns true if the string consistently uses Unix line endings.
|
3425
|
+
*/
|
3426
|
+
function hasOnlyUnixLineEndings(string) {
|
3427
|
+
return !string.includes('\r\n') && string.includes('\n');
|
3314
3428
|
}
|
3315
3429
|
|
3430
|
+
// Based on https://en.wikipedia.org/wiki/Latin_script_in_Unicode
|
3316
3431
|
//
|
3317
3432
|
// Ranges and exceptions:
|
3318
3433
|
// Latin-1 Supplement, 0080–00FF
|
@@ -3330,77 +3445,330 @@ function lib_generateOptions(options, defaults) {
|
|
3330
3445
|
// - U+02DC ˜ ˜ Small Tilde
|
3331
3446
|
// - U+02DD ˝ ˝ Double Acute Accent
|
3332
3447
|
// Latin Extended Additional, 1E00–1EFF
|
3448
|
+
var lib_extendedWordChars = "a-zA-Z0-9_\\u{C0}-\\u{FF}\\u{D8}-\\u{F6}\\u{F8}-\\u{2C6}\\u{2C8}-\\u{2D7}\\u{2DE}-\\u{2FF}\\u{1E00}-\\u{1EFF}";
|
3333
3449
|
|
3334
|
-
|
3335
|
-
|
3450
|
+
// Each token is one of the following:
|
3451
|
+
// - A punctuation mark plus the surrounding whitespace
|
3452
|
+
// - A word plus the surrounding whitespace
|
3453
|
+
// - Pure whitespace (but only in the special case where this the entire text
|
3454
|
+
// is just whitespace)
|
3455
|
+
//
|
3456
|
+
// We have to include surrounding whitespace in the tokens because the two
|
3457
|
+
// alternative approaches produce horribly broken results:
|
3458
|
+
// * If we just discard the whitespace, we can't fully reproduce the original
|
3459
|
+
// text from the sequence of tokens and any attempt to render the diff will
|
3460
|
+
// get the whitespace wrong.
|
3461
|
+
// * If we have separate tokens for whitespace, then in a typical text every
|
3462
|
+
// second token will be a single space character. But this often results in
|
3463
|
+
// the optimal diff between two texts being a perverse one that preserves
|
3464
|
+
// the spaces between words but deletes and reinserts actual common words.
|
3465
|
+
// See https://github.com/kpdecker/jsdiff/issues/160#issuecomment-1866099640
|
3466
|
+
// for an example.
|
3467
|
+
//
|
3468
|
+
// Keeping the surrounding whitespace of course has implications for .equals
|
3469
|
+
// and .join, not just .tokenize.
|
3470
|
+
|
3471
|
+
// This regex does NOT fully implement the tokenization rules described above.
|
3472
|
+
// Instead, it gives runs of whitespace their own "token". The tokenize method
|
3473
|
+
// then handles stitching whitespace tokens onto adjacent word or punctuation
|
3474
|
+
// tokens.
|
3475
|
+
var tokenizeIncludingWhitespace = new RegExp("[".concat(lib_extendedWordChars, "]+|\\s+|[^").concat(lib_extendedWordChars, "]"), 'ug');
|
3336
3476
|
var lib_wordDiff = new lib_Diff();
|
3337
|
-
|
3338
|
-
|
3339
|
-
if (this.options.ignoreCase) {
|
3477
|
+
lib_wordDiff.equals = function (left, right, options) {
|
3478
|
+
if (options.ignoreCase) {
|
3340
3479
|
left = left.toLowerCase();
|
3341
3480
|
right = right.toLowerCase();
|
3342
3481
|
}
|
3343
|
-
|
3344
|
-
return left === right || this.options.ignoreWhitespace && !lib_reWhitespace.test(left) && !lib_reWhitespace.test(right);
|
3482
|
+
return left.trim() === right.trim();
|
3345
3483
|
};
|
3346
|
-
|
3347
3484
|
lib_wordDiff.tokenize = function (value) {
|
3348
|
-
|
3349
|
-
var
|
3350
|
-
|
3351
|
-
|
3352
|
-
|
3353
|
-
if (!tokens[i + 1] && tokens[i + 2] && lib_extendedWordChars.test(tokens[i]) && lib_extendedWordChars.test(tokens[i + 2])) {
|
3354
|
-
tokens[i] += tokens[i + 2];
|
3355
|
-
tokens.splice(i + 1, 2);
|
3356
|
-
i--;
|
3485
|
+
var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
|
3486
|
+
var parts;
|
3487
|
+
if (options.intlSegmenter) {
|
3488
|
+
if (options.intlSegmenter.resolvedOptions().granularity != 'word') {
|
3489
|
+
throw new Error('The segmenter passed must have a granularity of "word"');
|
3357
3490
|
}
|
3491
|
+
parts = Array.from(options.intlSegmenter.segment(value), function (segment) {
|
3492
|
+
return segment.segment;
|
3493
|
+
});
|
3494
|
+
} else {
|
3495
|
+
parts = value.match(tokenizeIncludingWhitespace) || [];
|
3358
3496
|
}
|
3359
|
-
|
3497
|
+
var tokens = [];
|
3498
|
+
var prevPart = null;
|
3499
|
+
parts.forEach(function (part) {
|
3500
|
+
if (/\s/.test(part)) {
|
3501
|
+
if (prevPart == null) {
|
3502
|
+
tokens.push(part);
|
3503
|
+
} else {
|
3504
|
+
tokens.push(tokens.pop() + part);
|
3505
|
+
}
|
3506
|
+
} else if (/\s/.test(prevPart)) {
|
3507
|
+
if (tokens[tokens.length - 1] == prevPart) {
|
3508
|
+
tokens.push(tokens.pop() + part);
|
3509
|
+
} else {
|
3510
|
+
tokens.push(prevPart + part);
|
3511
|
+
}
|
3512
|
+
} else {
|
3513
|
+
tokens.push(part);
|
3514
|
+
}
|
3515
|
+
prevPart = part;
|
3516
|
+
});
|
3360
3517
|
return tokens;
|
3361
3518
|
};
|
3362
|
-
|
3363
|
-
|
3364
|
-
|
3365
|
-
|
3519
|
+
lib_wordDiff.join = function (tokens) {
|
3520
|
+
// Tokens being joined here will always have appeared consecutively in the
|
3521
|
+
// same text, so we can simply strip off the leading whitespace from all the
|
3522
|
+
// tokens except the first (and except any whitespace-only tokens - but such
|
3523
|
+
// a token will always be the first and only token anyway) and then join them
|
3524
|
+
// and the whitespace around words and punctuation will end up correct.
|
3525
|
+
return tokens.map(function (token, i) {
|
3526
|
+
if (i == 0) {
|
3527
|
+
return token;
|
3528
|
+
} else {
|
3529
|
+
return token.replace(/^\s+/, '');
|
3530
|
+
}
|
3531
|
+
}).join('');
|
3532
|
+
};
|
3533
|
+
lib_wordDiff.postProcess = function (changes, options) {
|
3534
|
+
if (!changes || options.oneChangePerToken) {
|
3535
|
+
return changes;
|
3536
|
+
}
|
3537
|
+
var lastKeep = null;
|
3538
|
+
// Change objects representing any insertion or deletion since the last
|
3539
|
+
// "keep" change object. There can be at most one of each.
|
3540
|
+
var insertion = null;
|
3541
|
+
var deletion = null;
|
3542
|
+
changes.forEach(function (change) {
|
3543
|
+
if (change.added) {
|
3544
|
+
insertion = change;
|
3545
|
+
} else if (change.removed) {
|
3546
|
+
deletion = change;
|
3547
|
+
} else {
|
3548
|
+
if (insertion || deletion) {
|
3549
|
+
// May be false at start of text
|
3550
|
+
dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, change);
|
3551
|
+
}
|
3552
|
+
lastKeep = change;
|
3553
|
+
insertion = null;
|
3554
|
+
deletion = null;
|
3555
|
+
}
|
3366
3556
|
});
|
3557
|
+
if (insertion || deletion) {
|
3558
|
+
dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, null);
|
3559
|
+
}
|
3560
|
+
return changes;
|
3561
|
+
};
|
3562
|
+
function lib_diffWords(oldStr, newStr, options) {
|
3563
|
+
// This option has never been documented and never will be (it's clearer to
|
3564
|
+
// just call `diffWordsWithSpace` directly if you need that behavior), but
|
3565
|
+
// has existed in jsdiff for a long time, so we retain support for it here
|
3566
|
+
// for the sake of backwards compatibility.
|
3567
|
+
if ((options === null || options === void 0 ? void 0 : options.ignoreWhitespace) != null && !options.ignoreWhitespace) {
|
3568
|
+
return lib_diffWordsWithSpace(oldStr, newStr, options);
|
3569
|
+
}
|
3367
3570
|
return lib_wordDiff.diff(oldStr, newStr, options);
|
3368
3571
|
}
|
3572
|
+
function dedupeWhitespaceInChangeObjects(startKeep, deletion, insertion, endKeep) {
|
3573
|
+
// Before returning, we tidy up the leading and trailing whitespace of the
|
3574
|
+
// change objects to eliminate cases where trailing whitespace in one object
|
3575
|
+
// is repeated as leading whitespace in the next.
|
3576
|
+
// Below are examples of the outcomes we want here to explain the code.
|
3577
|
+
// I=insert, K=keep, D=delete
|
3578
|
+
// 1. diffing 'foo bar baz' vs 'foo baz'
|
3579
|
+
// Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
|
3580
|
+
// After cleanup, we want: K:'foo ' D:'bar ' K:'baz'
|
3581
|
+
//
|
3582
|
+
// 2. Diffing 'foo bar baz' vs 'foo qux baz'
|
3583
|
+
// Prior to cleanup, we have K:'foo ' D:' bar ' I:' qux ' K:' baz'
|
3584
|
+
// After cleanup, we want K:'foo ' D:'bar' I:'qux' K:' baz'
|
3585
|
+
//
|
3586
|
+
// 3. Diffing 'foo\nbar baz' vs 'foo baz'
|
3587
|
+
// Prior to cleanup, we have K:'foo ' D:'\nbar ' K:' baz'
|
3588
|
+
// After cleanup, we want K'foo' D:'\nbar' K:' baz'
|
3589
|
+
//
|
3590
|
+
// 4. Diffing 'foo baz' vs 'foo\nbar baz'
|
3591
|
+
// Prior to cleanup, we have K:'foo\n' I:'\nbar ' K:' baz'
|
3592
|
+
// After cleanup, we ideally want K'foo' I:'\nbar' K:' baz'
|
3593
|
+
// but don't actually manage this currently (the pre-cleanup change
|
3594
|
+
// objects don't contain enough information to make it possible).
|
3595
|
+
//
|
3596
|
+
// 5. Diffing 'foo bar baz' vs 'foo baz'
|
3597
|
+
// Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
|
3598
|
+
// After cleanup, we want K:'foo ' D:' bar ' K:'baz'
|
3599
|
+
//
|
3600
|
+
// Our handling is unavoidably imperfect in the case where there's a single
|
3601
|
+
// indel between keeps and the whitespace has changed. For instance, consider
|
3602
|
+
// diffing 'foo\tbar\nbaz' vs 'foo baz'. Unless we create an extra change
|
3603
|
+
// object to represent the insertion of the space character (which isn't even
|
3604
|
+
// a token), we have no way to avoid losing information about the texts'
|
3605
|
+
// original whitespace in the result we return. Still, we do our best to
|
3606
|
+
// output something that will look sensible if we e.g. print it with
|
3607
|
+
// insertions in green and deletions in red.
|
3608
|
+
|
3609
|
+
// Between two "keep" change objects (or before the first or after the last
|
3610
|
+
// change object), we can have either:
|
3611
|
+
// * A "delete" followed by an "insert"
|
3612
|
+
// * Just an "insert"
|
3613
|
+
// * Just a "delete"
|
3614
|
+
// We handle the three cases separately.
|
3615
|
+
if (deletion && insertion) {
|
3616
|
+
var oldWsPrefix = deletion.value.match(/^\s*/)[0];
|
3617
|
+
var oldWsSuffix = deletion.value.match(/\s*$/)[0];
|
3618
|
+
var newWsPrefix = insertion.value.match(/^\s*/)[0];
|
3619
|
+
var newWsSuffix = insertion.value.match(/\s*$/)[0];
|
3620
|
+
if (startKeep) {
|
3621
|
+
var commonWsPrefix = longestCommonPrefix(oldWsPrefix, newWsPrefix);
|
3622
|
+
startKeep.value = replaceSuffix(startKeep.value, newWsPrefix, commonWsPrefix);
|
3623
|
+
deletion.value = removePrefix(deletion.value, commonWsPrefix);
|
3624
|
+
insertion.value = removePrefix(insertion.value, commonWsPrefix);
|
3625
|
+
}
|
3626
|
+
if (endKeep) {
|
3627
|
+
var commonWsSuffix = longestCommonSuffix(oldWsSuffix, newWsSuffix);
|
3628
|
+
endKeep.value = replacePrefix(endKeep.value, newWsSuffix, commonWsSuffix);
|
3629
|
+
deletion.value = removeSuffix(deletion.value, commonWsSuffix);
|
3630
|
+
insertion.value = removeSuffix(insertion.value, commonWsSuffix);
|
3631
|
+
}
|
3632
|
+
} else if (insertion) {
|
3633
|
+
// The whitespaces all reflect what was in the new text rather than
|
3634
|
+
// the old, so we essentially have no information about whitespace
|
3635
|
+
// insertion or deletion. We just want to dedupe the whitespace.
|
3636
|
+
// We do that by having each change object keep its trailing
|
3637
|
+
// whitespace and deleting duplicate leading whitespace where
|
3638
|
+
// present.
|
3639
|
+
if (startKeep) {
|
3640
|
+
insertion.value = insertion.value.replace(/^\s*/, '');
|
3641
|
+
}
|
3642
|
+
if (endKeep) {
|
3643
|
+
endKeep.value = endKeep.value.replace(/^\s*/, '');
|
3644
|
+
}
|
3645
|
+
// otherwise we've got a deletion and no insertion
|
3646
|
+
} else if (startKeep && endKeep) {
|
3647
|
+
var newWsFull = endKeep.value.match(/^\s*/)[0],
|
3648
|
+
delWsStart = deletion.value.match(/^\s*/)[0],
|
3649
|
+
delWsEnd = deletion.value.match(/\s*$/)[0];
|
3650
|
+
|
3651
|
+
// Any whitespace that comes straight after startKeep in both the old and
|
3652
|
+
// new texts, assign to startKeep and remove from the deletion.
|
3653
|
+
var newWsStart = longestCommonPrefix(newWsFull, delWsStart);
|
3654
|
+
deletion.value = removePrefix(deletion.value, newWsStart);
|
3655
|
+
|
3656
|
+
// Any whitespace that comes straight before endKeep in both the old and
|
3657
|
+
// new texts, and hasn't already been assigned to startKeep, assign to
|
3658
|
+
// endKeep and remove from the deletion.
|
3659
|
+
var newWsEnd = longestCommonSuffix(removePrefix(newWsFull, newWsStart), delWsEnd);
|
3660
|
+
deletion.value = removeSuffix(deletion.value, newWsEnd);
|
3661
|
+
endKeep.value = replacePrefix(endKeep.value, newWsFull, newWsEnd);
|
3662
|
+
|
3663
|
+
// If there's any whitespace from the new text that HASN'T already been
|
3664
|
+
// assigned, assign it to the start:
|
3665
|
+
startKeep.value = replaceSuffix(startKeep.value, newWsFull, newWsFull.slice(0, newWsFull.length - newWsEnd.length));
|
3666
|
+
} else if (endKeep) {
|
3667
|
+
// We are at the start of the text. Preserve all the whitespace on
|
3668
|
+
// endKeep, and just remove whitespace from the end of deletion to the
|
3669
|
+
// extent that it overlaps with the start of endKeep.
|
3670
|
+
var endKeepWsPrefix = endKeep.value.match(/^\s*/)[0];
|
3671
|
+
var deletionWsSuffix = deletion.value.match(/\s*$/)[0];
|
3672
|
+
var overlap = maximumOverlap(deletionWsSuffix, endKeepWsPrefix);
|
3673
|
+
deletion.value = removeSuffix(deletion.value, overlap);
|
3674
|
+
} else if (startKeep) {
|
3675
|
+
// We are at the END of the text. Preserve all the whitespace on
|
3676
|
+
// startKeep, and just remove whitespace from the start of deletion to
|
3677
|
+
// the extent that it overlaps with the end of startKeep.
|
3678
|
+
var startKeepWsSuffix = startKeep.value.match(/\s*$/)[0];
|
3679
|
+
var deletionWsPrefix = deletion.value.match(/^\s*/)[0];
|
3680
|
+
var _overlap = maximumOverlap(startKeepWsSuffix, deletionWsPrefix);
|
3681
|
+
deletion.value = removePrefix(deletion.value, _overlap);
|
3682
|
+
}
|
3683
|
+
}
|
3684
|
+
var wordWithSpaceDiff = new lib_Diff();
|
3685
|
+
wordWithSpaceDiff.tokenize = function (value) {
|
3686
|
+
// Slightly different to the tokenizeIncludingWhitespace regex used above in
|
3687
|
+
// that this one treats each individual newline as a distinct tokens, rather
|
3688
|
+
// than merging them into other surrounding whitespace. This was requested
|
3689
|
+
// in https://github.com/kpdecker/jsdiff/issues/180 &
|
3690
|
+
// https://github.com/kpdecker/jsdiff/issues/211
|
3691
|
+
var regex = new RegExp("(\\r?\\n)|[".concat(lib_extendedWordChars, "]+|[^\\S\\n\\r]+|[^").concat(lib_extendedWordChars, "]"), 'ug');
|
3692
|
+
return value.match(regex) || [];
|
3693
|
+
};
|
3369
3694
|
function lib_diffWordsWithSpace(oldStr, newStr, options) {
|
3370
|
-
return
|
3695
|
+
return wordWithSpaceDiff.diff(oldStr, newStr, options);
|
3371
3696
|
}
|
3372
3697
|
|
3373
|
-
|
3374
|
-
|
3375
|
-
|
3698
|
+
function lib_generateOptions(options, defaults) {
|
3699
|
+
if (typeof options === 'function') {
|
3700
|
+
defaults.callback = options;
|
3701
|
+
} else if (options) {
|
3702
|
+
for (var name in options) {
|
3703
|
+
/* istanbul ignore else */
|
3704
|
+
if (options.hasOwnProperty(name)) {
|
3705
|
+
defaults[name] = options[name];
|
3706
|
+
}
|
3707
|
+
}
|
3708
|
+
}
|
3709
|
+
return defaults;
|
3710
|
+
}
|
3711
|
+
|
3712
|
+
var lib_lineDiff = new lib_Diff();
|
3713
|
+
lib_lineDiff.tokenize = function (value, options) {
|
3714
|
+
if (options.stripTrailingCr) {
|
3715
|
+
// remove one \r before \n to match GNU diff's --strip-trailing-cr behavior
|
3716
|
+
value = value.replace(/\r\n/g, '\n');
|
3717
|
+
}
|
3376
3718
|
var retLines = [],
|
3377
|
-
|
3719
|
+
linesAndNewlines = value.split(/(\n|\r\n)/);
|
3378
3720
|
|
3721
|
+
// Ignore the final empty token that occurs if the string ends with a new line
|
3379
3722
|
if (!linesAndNewlines[linesAndNewlines.length - 1]) {
|
3380
3723
|
linesAndNewlines.pop();
|
3381
|
-
}
|
3382
|
-
|
3724
|
+
}
|
3383
3725
|
|
3726
|
+
// Merge the content and line separators into single tokens
|
3384
3727
|
for (var i = 0; i < linesAndNewlines.length; i++) {
|
3385
3728
|
var line = linesAndNewlines[i];
|
3386
|
-
|
3387
|
-
if (i % 2 && !this.options.newlineIsToken) {
|
3729
|
+
if (i % 2 && !options.newlineIsToken) {
|
3388
3730
|
retLines[retLines.length - 1] += line;
|
3389
3731
|
} else {
|
3390
|
-
if (this.options.ignoreWhitespace) {
|
3391
|
-
line = line.trim();
|
3392
|
-
}
|
3393
|
-
|
3394
3732
|
retLines.push(line);
|
3395
3733
|
}
|
3396
3734
|
}
|
3397
|
-
|
3398
3735
|
return retLines;
|
3399
3736
|
};
|
3400
|
-
|
3737
|
+
lib_lineDiff.equals = function (left, right, options) {
|
3738
|
+
// If we're ignoring whitespace, we need to normalise lines by stripping
|
3739
|
+
// whitespace before checking equality. (This has an annoying interaction
|
3740
|
+
// with newlineIsToken that requires special handling: if newlines get their
|
3741
|
+
// own token, then we DON'T want to trim the *newline* tokens down to empty
|
3742
|
+
// strings, since this would cause us to treat whitespace-only line content
|
3743
|
+
// as equal to a separator between lines, which would be weird and
|
3744
|
+
// inconsistent with the documented behavior of the options.)
|
3745
|
+
if (options.ignoreWhitespace) {
|
3746
|
+
if (!options.newlineIsToken || !left.includes('\n')) {
|
3747
|
+
left = left.trim();
|
3748
|
+
}
|
3749
|
+
if (!options.newlineIsToken || !right.includes('\n')) {
|
3750
|
+
right = right.trim();
|
3751
|
+
}
|
3752
|
+
} else if (options.ignoreNewlineAtEof && !options.newlineIsToken) {
|
3753
|
+
if (left.endsWith('\n')) {
|
3754
|
+
left = left.slice(0, -1);
|
3755
|
+
}
|
3756
|
+
if (right.endsWith('\n')) {
|
3757
|
+
right = right.slice(0, -1);
|
3758
|
+
}
|
3759
|
+
}
|
3760
|
+
return lib_Diff.prototype.equals.call(this, left, right, options);
|
3761
|
+
};
|
3401
3762
|
function lib_diffLines(oldStr, newStr, callback) {
|
3402
3763
|
return lib_lineDiff.diff(oldStr, newStr, callback);
|
3403
3764
|
}
|
3765
|
+
|
3766
|
+
// Kept for backwards compatibility. This is a rather arbitrary wrapper method
|
3767
|
+
// that just calls `diffLines` with `ignoreWhitespace: true`. It's confusing to
|
3768
|
+
// have two ways to do exactly the same thing in the API, so we no longer
|
3769
|
+
// document this one (library users should explicitly use `diffLines` with
|
3770
|
+
// `ignoreWhitespace: true` instead) but we keep it around to maintain
|
3771
|
+
// compatibility with code that used old versions.
|
3404
3772
|
function lib_diffTrimmedLines(oldStr, newStr, callback) {
|
3405
3773
|
var options = lib_generateOptions(callback, {
|
3406
3774
|
ignoreWhitespace: true
|
@@ -3409,53 +3777,88 @@ function lib_diffTrimmedLines(oldStr, newStr, callback) {
|
|
3409
3777
|
}
|
3410
3778
|
|
3411
3779
|
var lib_sentenceDiff = new lib_Diff();
|
3412
|
-
|
3413
3780
|
lib_sentenceDiff.tokenize = function (value) {
|
3414
3781
|
return value.split(/(\S.+?[.!?])(?=\s+|$)/);
|
3415
3782
|
};
|
3416
|
-
|
3417
3783
|
function lib_diffSentences(oldStr, newStr, callback) {
|
3418
3784
|
return lib_sentenceDiff.diff(oldStr, newStr, callback);
|
3419
3785
|
}
|
3420
3786
|
|
3421
3787
|
var lib_cssDiff = new lib_Diff();
|
3422
|
-
|
3423
3788
|
lib_cssDiff.tokenize = function (value) {
|
3424
3789
|
return value.split(/([{}:;,]|\s+)/);
|
3425
3790
|
};
|
3426
|
-
|
3427
3791
|
function lib_diffCss(oldStr, newStr, callback) {
|
3428
3792
|
return lib_cssDiff.diff(oldStr, newStr, callback);
|
3429
3793
|
}
|
3430
3794
|
|
3431
|
-
function
|
3795
|
+
function ownKeys(e, r) {
|
3796
|
+
var t = Object.keys(e);
|
3797
|
+
if (Object.getOwnPropertySymbols) {
|
3798
|
+
var o = Object.getOwnPropertySymbols(e);
|
3799
|
+
r && (o = o.filter(function (r) {
|
3800
|
+
return Object.getOwnPropertyDescriptor(e, r).enumerable;
|
3801
|
+
})), t.push.apply(t, o);
|
3802
|
+
}
|
3803
|
+
return t;
|
3804
|
+
}
|
3805
|
+
function _objectSpread2(e) {
|
3806
|
+
for (var r = 1; r < arguments.length; r++) {
|
3807
|
+
var t = null != arguments[r] ? arguments[r] : {};
|
3808
|
+
r % 2 ? ownKeys(Object(t), !0).forEach(function (r) {
|
3809
|
+
_defineProperty(e, r, t[r]);
|
3810
|
+
}) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) {
|
3811
|
+
Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r));
|
3812
|
+
});
|
3813
|
+
}
|
3814
|
+
return e;
|
3815
|
+
}
|
3816
|
+
function _toPrimitive(t, r) {
|
3817
|
+
if ("object" != typeof t || !t) return t;
|
3818
|
+
var e = t[Symbol.toPrimitive];
|
3819
|
+
if (void 0 !== e) {
|
3820
|
+
var i = e.call(t, r || "default");
|
3821
|
+
if ("object" != typeof i) return i;
|
3822
|
+
throw new TypeError("@@toPrimitive must return a primitive value.");
|
3823
|
+
}
|
3824
|
+
return ("string" === r ? String : Number)(t);
|
3825
|
+
}
|
3826
|
+
function _toPropertyKey(t) {
|
3827
|
+
var i = _toPrimitive(t, "string");
|
3828
|
+
return "symbol" == typeof i ? i : i + "";
|
3829
|
+
}
|
3830
|
+
function lib_typeof(o) {
|
3432
3831
|
"@babel/helpers - typeof";
|
3433
3832
|
|
3434
|
-
|
3435
|
-
|
3436
|
-
|
3437
|
-
|
3833
|
+
return lib_typeof = "function" == typeof Symbol && "symbol" == typeof Symbol.iterator ? function (o) {
|
3834
|
+
return typeof o;
|
3835
|
+
} : function (o) {
|
3836
|
+
return o && "function" == typeof Symbol && o.constructor === Symbol && o !== Symbol.prototype ? "symbol" : typeof o;
|
3837
|
+
}, lib_typeof(o);
|
3838
|
+
}
|
3839
|
+
function _defineProperty(obj, key, value) {
|
3840
|
+
key = _toPropertyKey(key);
|
3841
|
+
if (key in obj) {
|
3842
|
+
Object.defineProperty(obj, key, {
|
3843
|
+
value: value,
|
3844
|
+
enumerable: true,
|
3845
|
+
configurable: true,
|
3846
|
+
writable: true
|
3847
|
+
});
|
3438
3848
|
} else {
|
3439
|
-
|
3440
|
-
return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
|
3441
|
-
};
|
3849
|
+
obj[key] = value;
|
3442
3850
|
}
|
3443
|
-
|
3444
|
-
return lib_typeof(obj);
|
3851
|
+
return obj;
|
3445
3852
|
}
|
3446
|
-
|
3447
3853
|
function lib_toConsumableArray(arr) {
|
3448
3854
|
return lib_arrayWithoutHoles(arr) || lib_iterableToArray(arr) || diff_lib_unsupportedIterableToArray(arr) || lib_nonIterableSpread();
|
3449
3855
|
}
|
3450
|
-
|
3451
3856
|
function lib_arrayWithoutHoles(arr) {
|
3452
3857
|
if (Array.isArray(arr)) return diff_lib_arrayLikeToArray(arr);
|
3453
3858
|
}
|
3454
|
-
|
3455
3859
|
function lib_iterableToArray(iter) {
|
3456
|
-
if (typeof Symbol !== "undefined" && Symbol.iterator
|
3860
|
+
if (typeof Symbol !== "undefined" && iter[Symbol.iterator] != null || iter["@@iterator"] != null) return Array.from(iter);
|
3457
3861
|
}
|
3458
|
-
|
3459
3862
|
function diff_lib_unsupportedIterableToArray(o, minLen) {
|
3460
3863
|
if (!o) return;
|
3461
3864
|
if (typeof o === "string") return diff_lib_arrayLikeToArray(o, minLen);
|
@@ -3464,238 +3867,263 @@ function diff_lib_unsupportedIterableToArray(o, minLen) {
|
|
3464
3867
|
if (n === "Map" || n === "Set") return Array.from(o);
|
3465
3868
|
if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return diff_lib_arrayLikeToArray(o, minLen);
|
3466
3869
|
}
|
3467
|
-
|
3468
3870
|
function diff_lib_arrayLikeToArray(arr, len) {
|
3469
3871
|
if (len == null || len > arr.length) len = arr.length;
|
3470
|
-
|
3471
3872
|
for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
|
3472
|
-
|
3473
3873
|
return arr2;
|
3474
3874
|
}
|
3475
|
-
|
3476
3875
|
function lib_nonIterableSpread() {
|
3477
3876
|
throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
|
3478
3877
|
}
|
3479
3878
|
|
3480
|
-
var
|
3481
|
-
|
3879
|
+
var lib_jsonDiff = new lib_Diff();
|
3880
|
+
// Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
|
3482
3881
|
// dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
|
3483
|
-
|
3484
3882
|
lib_jsonDiff.useLongestToken = true;
|
3485
3883
|
lib_jsonDiff.tokenize = lib_lineDiff.tokenize;
|
3486
|
-
|
3487
|
-
|
3488
|
-
|
3489
|
-
|
3490
|
-
|
3491
|
-
|
3492
|
-
return typeof v === 'undefined' ? undefinedReplacement : v;
|
3493
|
-
} : _this$options$stringi;
|
3884
|
+
lib_jsonDiff.castInput = function (value, options) {
|
3885
|
+
var undefinedReplacement = options.undefinedReplacement,
|
3886
|
+
_options$stringifyRep = options.stringifyReplacer,
|
3887
|
+
stringifyReplacer = _options$stringifyRep === void 0 ? function (k, v) {
|
3888
|
+
return typeof v === 'undefined' ? undefinedReplacement : v;
|
3889
|
+
} : _options$stringifyRep;
|
3494
3890
|
return typeof value === 'string' ? value : JSON.stringify(lib_canonicalize(value, null, null, stringifyReplacer), stringifyReplacer, ' ');
|
3495
3891
|
};
|
3496
|
-
|
3497
|
-
|
3498
|
-
return lib_Diff.prototype.equals.call(lib_jsonDiff, left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'));
|
3892
|
+
lib_jsonDiff.equals = function (left, right, options) {
|
3893
|
+
return lib_Diff.prototype.equals.call(lib_jsonDiff, left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'), options);
|
3499
3894
|
};
|
3500
|
-
|
3501
3895
|
function lib_diffJson(oldObj, newObj, options) {
|
3502
3896
|
return lib_jsonDiff.diff(oldObj, newObj, options);
|
3503
|
-
}
|
3504
|
-
// object that is already on the "stack" of items being processed. Accepts an optional replacer
|
3897
|
+
}
|
3505
3898
|
|
3899
|
+
// This function handles the presence of circular references by bailing out when encountering an
|
3900
|
+
// object that is already on the "stack" of items being processed. Accepts an optional replacer
|
3506
3901
|
function lib_canonicalize(obj, stack, replacementStack, replacer, key) {
|
3507
3902
|
stack = stack || [];
|
3508
3903
|
replacementStack = replacementStack || [];
|
3509
|
-
|
3510
3904
|
if (replacer) {
|
3511
3905
|
obj = replacer(key, obj);
|
3512
3906
|
}
|
3513
|
-
|
3514
3907
|
var i;
|
3515
|
-
|
3516
3908
|
for (i = 0; i < stack.length; i += 1) {
|
3517
3909
|
if (stack[i] === obj) {
|
3518
3910
|
return replacementStack[i];
|
3519
3911
|
}
|
3520
3912
|
}
|
3521
|
-
|
3522
3913
|
var canonicalizedObj;
|
3523
|
-
|
3524
|
-
if ('[object Array]' === lib_objectPrototypeToString.call(obj)) {
|
3914
|
+
if ('[object Array]' === Object.prototype.toString.call(obj)) {
|
3525
3915
|
stack.push(obj);
|
3526
3916
|
canonicalizedObj = new Array(obj.length);
|
3527
3917
|
replacementStack.push(canonicalizedObj);
|
3528
|
-
|
3529
3918
|
for (i = 0; i < obj.length; i += 1) {
|
3530
3919
|
canonicalizedObj[i] = lib_canonicalize(obj[i], stack, replacementStack, replacer, key);
|
3531
3920
|
}
|
3532
|
-
|
3533
3921
|
stack.pop();
|
3534
3922
|
replacementStack.pop();
|
3535
3923
|
return canonicalizedObj;
|
3536
3924
|
}
|
3537
|
-
|
3538
3925
|
if (obj && obj.toJSON) {
|
3539
3926
|
obj = obj.toJSON();
|
3540
3927
|
}
|
3541
|
-
|
3542
3928
|
if (lib_typeof(obj) === 'object' && obj !== null) {
|
3543
3929
|
stack.push(obj);
|
3544
3930
|
canonicalizedObj = {};
|
3545
3931
|
replacementStack.push(canonicalizedObj);
|
3546
|
-
|
3547
3932
|
var sortedKeys = [],
|
3548
|
-
|
3549
|
-
|
3933
|
+
_key;
|
3550
3934
|
for (_key in obj) {
|
3551
3935
|
/* istanbul ignore else */
|
3552
|
-
if (
|
3936
|
+
if (Object.prototype.hasOwnProperty.call(obj, _key)) {
|
3553
3937
|
sortedKeys.push(_key);
|
3554
3938
|
}
|
3555
3939
|
}
|
3556
|
-
|
3557
3940
|
sortedKeys.sort();
|
3558
|
-
|
3559
3941
|
for (i = 0; i < sortedKeys.length; i += 1) {
|
3560
3942
|
_key = sortedKeys[i];
|
3561
3943
|
canonicalizedObj[_key] = lib_canonicalize(obj[_key], stack, replacementStack, replacer, _key);
|
3562
3944
|
}
|
3563
|
-
|
3564
3945
|
stack.pop();
|
3565
3946
|
replacementStack.pop();
|
3566
3947
|
} else {
|
3567
3948
|
canonicalizedObj = obj;
|
3568
3949
|
}
|
3569
|
-
|
3570
3950
|
return canonicalizedObj;
|
3571
3951
|
}
|
3572
3952
|
|
3573
3953
|
var lib_arrayDiff = new lib_Diff();
|
3574
|
-
|
3575
3954
|
lib_arrayDiff.tokenize = function (value) {
|
3576
3955
|
return value.slice();
|
3577
3956
|
};
|
3578
|
-
|
3579
3957
|
lib_arrayDiff.join = lib_arrayDiff.removeEmpty = function (value) {
|
3580
3958
|
return value;
|
3581
3959
|
};
|
3582
|
-
|
3583
3960
|
function lib_diffArrays(oldArr, newArr, callback) {
|
3584
3961
|
return lib_arrayDiff.diff(oldArr, newArr, callback);
|
3585
3962
|
}
|
3586
3963
|
|
3587
|
-
function
|
3588
|
-
|
3589
|
-
|
3590
|
-
|
3591
|
-
|
3592
|
-
|
3964
|
+
function unixToWin(patch) {
|
3965
|
+
if (Array.isArray(patch)) {
|
3966
|
+
return patch.map(unixToWin);
|
3967
|
+
}
|
3968
|
+
return _objectSpread2(_objectSpread2({}, patch), {}, {
|
3969
|
+
hunks: patch.hunks.map(function (hunk) {
|
3970
|
+
return _objectSpread2(_objectSpread2({}, hunk), {}, {
|
3971
|
+
lines: hunk.lines.map(function (line, i) {
|
3972
|
+
var _hunk$lines;
|
3973
|
+
return line.startsWith('\\') || line.endsWith('\r') || (_hunk$lines = hunk.lines[i + 1]) !== null && _hunk$lines !== void 0 && _hunk$lines.startsWith('\\') ? line : line + '\r';
|
3974
|
+
})
|
3975
|
+
});
|
3976
|
+
})
|
3977
|
+
});
|
3978
|
+
}
|
3979
|
+
function winToUnix(patch) {
|
3980
|
+
if (Array.isArray(patch)) {
|
3981
|
+
return patch.map(winToUnix);
|
3982
|
+
}
|
3983
|
+
return _objectSpread2(_objectSpread2({}, patch), {}, {
|
3984
|
+
hunks: patch.hunks.map(function (hunk) {
|
3985
|
+
return _objectSpread2(_objectSpread2({}, hunk), {}, {
|
3986
|
+
lines: hunk.lines.map(function (line) {
|
3987
|
+
return line.endsWith('\r') ? line.substring(0, line.length - 1) : line;
|
3988
|
+
})
|
3989
|
+
});
|
3990
|
+
})
|
3991
|
+
});
|
3992
|
+
}
|
3993
|
+
|
3994
|
+
/**
|
3995
|
+
* Returns true if the patch consistently uses Unix line endings (or only involves one line and has
|
3996
|
+
* no line endings).
|
3997
|
+
*/
|
3998
|
+
function isUnix(patch) {
|
3999
|
+
if (!Array.isArray(patch)) {
|
4000
|
+
patch = [patch];
|
4001
|
+
}
|
4002
|
+
return !patch.some(function (index) {
|
4003
|
+
return index.hunks.some(function (hunk) {
|
4004
|
+
return hunk.lines.some(function (line) {
|
4005
|
+
return !line.startsWith('\\') && line.endsWith('\r');
|
4006
|
+
});
|
4007
|
+
});
|
4008
|
+
});
|
4009
|
+
}
|
3593
4010
|
|
4011
|
+
/**
|
4012
|
+
* Returns true if the patch uses Windows line endings and only Windows line endings.
|
4013
|
+
*/
|
4014
|
+
function isWin(patch) {
|
4015
|
+
if (!Array.isArray(patch)) {
|
4016
|
+
patch = [patch];
|
4017
|
+
}
|
4018
|
+
return patch.some(function (index) {
|
4019
|
+
return index.hunks.some(function (hunk) {
|
4020
|
+
return hunk.lines.some(function (line) {
|
4021
|
+
return line.endsWith('\r');
|
4022
|
+
});
|
4023
|
+
});
|
4024
|
+
}) && patch.every(function (index) {
|
4025
|
+
return index.hunks.every(function (hunk) {
|
4026
|
+
return hunk.lines.every(function (line, i) {
|
4027
|
+
var _hunk$lines2;
|
4028
|
+
return line.startsWith('\\') || line.endsWith('\r') || ((_hunk$lines2 = hunk.lines[i + 1]) === null || _hunk$lines2 === void 0 ? void 0 : _hunk$lines2.startsWith('\\'));
|
4029
|
+
});
|
4030
|
+
});
|
4031
|
+
});
|
4032
|
+
}
|
4033
|
+
|
4034
|
+
function lib_parsePatch(uniDiff) {
|
4035
|
+
var diffstr = uniDiff.split(/\n/),
|
4036
|
+
list = [],
|
4037
|
+
i = 0;
|
3594
4038
|
function parseIndex() {
|
3595
4039
|
var index = {};
|
3596
|
-
list.push(index);
|
4040
|
+
list.push(index);
|
3597
4041
|
|
4042
|
+
// Parse diff metadata
|
3598
4043
|
while (i < diffstr.length) {
|
3599
|
-
var line = diffstr[i];
|
4044
|
+
var line = diffstr[i];
|
3600
4045
|
|
4046
|
+
// File header found, end parsing diff metadata
|
3601
4047
|
if (/^(\-\-\-|\+\+\+|@@)\s/.test(line)) {
|
3602
4048
|
break;
|
3603
|
-
}
|
3604
|
-
|
4049
|
+
}
|
3605
4050
|
|
4051
|
+
// Diff index
|
3606
4052
|
var header = /^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/.exec(line);
|
3607
|
-
|
3608
4053
|
if (header) {
|
3609
4054
|
index.index = header[1];
|
3610
4055
|
}
|
3611
|
-
|
3612
4056
|
i++;
|
3613
|
-
}
|
3614
|
-
// there's no technical issues to have an isolated hunk without file header
|
3615
|
-
|
4057
|
+
}
|
3616
4058
|
|
4059
|
+
// Parse file headers if they are defined. Unified diff requires them, but
|
4060
|
+
// there's no technical issues to have an isolated hunk without file header
|
4061
|
+
parseFileHeader(index);
|
3617
4062
|
parseFileHeader(index);
|
3618
|
-
parseFileHeader(index); // Parse hunks
|
3619
4063
|
|
4064
|
+
// Parse hunks
|
3620
4065
|
index.hunks = [];
|
3621
|
-
|
3622
4066
|
while (i < diffstr.length) {
|
3623
4067
|
var _line = diffstr[i];
|
3624
|
-
|
3625
|
-
if (/^(Index:|diff|\-\-\-|\+\+\+)\s/.test(_line)) {
|
4068
|
+
if (/^(Index:\s|diff\s|\-\-\-\s|\+\+\+\s|===================================================================)/.test(_line)) {
|
3626
4069
|
break;
|
3627
4070
|
} else if (/^@@/.test(_line)) {
|
3628
4071
|
index.hunks.push(parseHunk());
|
3629
|
-
} else if (_line
|
3630
|
-
// Ignore unexpected content unless in strict mode
|
4072
|
+
} else if (_line) {
|
3631
4073
|
throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(_line));
|
3632
4074
|
} else {
|
3633
4075
|
i++;
|
3634
4076
|
}
|
3635
4077
|
}
|
3636
|
-
}
|
3637
|
-
// are consumed.
|
3638
|
-
|
4078
|
+
}
|
3639
4079
|
|
4080
|
+
// Parses the --- and +++ headers, if none are found, no lines
|
4081
|
+
// are consumed.
|
3640
4082
|
function parseFileHeader(index) {
|
3641
|
-
var fileHeader = /^(---|\+\+\+)\s+(.*)
|
3642
|
-
|
4083
|
+
var fileHeader = /^(---|\+\+\+)\s+(.*)\r?$/.exec(diffstr[i]);
|
3643
4084
|
if (fileHeader) {
|
3644
4085
|
var keyPrefix = fileHeader[1] === '---' ? 'old' : 'new';
|
3645
4086
|
var data = fileHeader[2].split('\t', 2);
|
3646
4087
|
var fileName = data[0].replace(/\\\\/g, '\\');
|
3647
|
-
|
3648
4088
|
if (/^".*"$/.test(fileName)) {
|
3649
4089
|
fileName = fileName.substr(1, fileName.length - 2);
|
3650
4090
|
}
|
3651
|
-
|
3652
4091
|
index[keyPrefix + 'FileName'] = fileName;
|
3653
4092
|
index[keyPrefix + 'Header'] = (data[1] || '').trim();
|
3654
4093
|
i++;
|
3655
4094
|
}
|
3656
|
-
}
|
3657
|
-
// This assumes that we are at the start of a hunk.
|
3658
|
-
|
4095
|
+
}
|
3659
4096
|
|
4097
|
+
// Parses a hunk
|
4098
|
+
// This assumes that we are at the start of a hunk.
|
3660
4099
|
function parseHunk() {
|
3661
4100
|
var chunkHeaderIndex = i,
|
3662
|
-
|
3663
|
-
|
4101
|
+
chunkHeaderLine = diffstr[i++],
|
4102
|
+
chunkHeader = chunkHeaderLine.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
|
3664
4103
|
var hunk = {
|
3665
4104
|
oldStart: +chunkHeader[1],
|
3666
4105
|
oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],
|
3667
4106
|
newStart: +chunkHeader[3],
|
3668
4107
|
newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],
|
3669
|
-
lines: []
|
3670
|
-
|
3671
|
-
|
4108
|
+
lines: []
|
4109
|
+
};
|
4110
|
+
|
4111
|
+
// Unified Diff Format quirk: If the chunk size is 0,
|
3672
4112
|
// the first number is one lower than one would expect.
|
3673
4113
|
// https://www.artima.com/weblogs/viewpost.jsp?thread=164293
|
3674
|
-
|
3675
4114
|
if (hunk.oldLines === 0) {
|
3676
4115
|
hunk.oldStart += 1;
|
3677
4116
|
}
|
3678
|
-
|
3679
4117
|
if (hunk.newLines === 0) {
|
3680
4118
|
hunk.newStart += 1;
|
3681
4119
|
}
|
3682
|
-
|
3683
4120
|
var addCount = 0,
|
3684
|
-
|
3685
|
-
|
3686
|
-
|
3687
|
-
// Lines starting with '---' could be mistaken for the "remove line" operation
|
3688
|
-
// But they could be the header for the next file. Therefore prune such cases out.
|
3689
|
-
if (diffstr[i].indexOf('--- ') === 0 && i + 2 < diffstr.length && diffstr[i + 1].indexOf('+++ ') === 0 && diffstr[i + 2].indexOf('@@') === 0) {
|
3690
|
-
break;
|
3691
|
-
}
|
3692
|
-
|
4121
|
+
removeCount = 0;
|
4122
|
+
for (; i < diffstr.length && (removeCount < hunk.oldLines || addCount < hunk.newLines || (_diffstr$i = diffstr[i]) !== null && _diffstr$i !== void 0 && _diffstr$i.startsWith('\\')); i++) {
|
4123
|
+
var _diffstr$i;
|
3693
4124
|
var operation = diffstr[i].length == 0 && i != diffstr.length - 1 ? ' ' : diffstr[i][0];
|
3694
|
-
|
3695
4125
|
if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\') {
|
3696
4126
|
hunk.lines.push(diffstr[i]);
|
3697
|
-
hunk.linedelimiters.push(delimiters[i] || '\n');
|
3698
|
-
|
3699
4127
|
if (operation === '+') {
|
3700
4128
|
addCount++;
|
3701
4129
|
} else if (operation === '-') {
|
@@ -3705,37 +4133,30 @@ function lib_parsePatch(uniDiff) {
|
|
3705
4133
|
removeCount++;
|
3706
4134
|
}
|
3707
4135
|
} else {
|
3708
|
-
|
4136
|
+
throw new Error("Hunk at line ".concat(chunkHeaderIndex + 1, " contained invalid line ").concat(diffstr[i]));
|
3709
4137
|
}
|
3710
|
-
}
|
3711
|
-
|
4138
|
+
}
|
3712
4139
|
|
4140
|
+
// Handle the empty block count case
|
3713
4141
|
if (!addCount && hunk.newLines === 1) {
|
3714
4142
|
hunk.newLines = 0;
|
3715
4143
|
}
|
3716
|
-
|
3717
4144
|
if (!removeCount && hunk.oldLines === 1) {
|
3718
4145
|
hunk.oldLines = 0;
|
3719
|
-
} // Perform optional sanity checking
|
3720
|
-
|
3721
|
-
|
3722
|
-
if (options.strict) {
|
3723
|
-
if (addCount !== hunk.newLines) {
|
3724
|
-
throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
|
3725
|
-
}
|
3726
|
-
|
3727
|
-
if (removeCount !== hunk.oldLines) {
|
3728
|
-
throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
|
3729
|
-
}
|
3730
4146
|
}
|
3731
4147
|
|
4148
|
+
// Perform sanity checking
|
4149
|
+
if (addCount !== hunk.newLines) {
|
4150
|
+
throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
|
4151
|
+
}
|
4152
|
+
if (removeCount !== hunk.oldLines) {
|
4153
|
+
throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
|
4154
|
+
}
|
3732
4155
|
return hunk;
|
3733
4156
|
}
|
3734
|
-
|
3735
4157
|
while (i < diffstr.length) {
|
3736
4158
|
parseIndex();
|
3737
4159
|
}
|
3738
|
-
|
3739
4160
|
return list;
|
3740
4161
|
}
|
3741
4162
|
|
@@ -3744,210 +4165,275 @@ function lib_parsePatch(uniDiff) {
|
|
3744
4165
|
// start of 2, this will iterate 2, 3, 1, 4, 0.
|
3745
4166
|
function lib_distanceIterator (start, minLine, maxLine) {
|
3746
4167
|
var wantForward = true,
|
3747
|
-
|
3748
|
-
|
3749
|
-
|
4168
|
+
backwardExhausted = false,
|
4169
|
+
forwardExhausted = false,
|
4170
|
+
localOffset = 1;
|
3750
4171
|
return function iterator() {
|
3751
4172
|
if (wantForward && !forwardExhausted) {
|
3752
4173
|
if (backwardExhausted) {
|
3753
4174
|
localOffset++;
|
3754
4175
|
} else {
|
3755
4176
|
wantForward = false;
|
3756
|
-
}
|
3757
|
-
// after offset location (or desired location on first iteration)
|
3758
|
-
|
4177
|
+
}
|
3759
4178
|
|
4179
|
+
// Check if trying to fit beyond text length, and if not, check it fits
|
4180
|
+
// after offset location (or desired location on first iteration)
|
3760
4181
|
if (start + localOffset <= maxLine) {
|
3761
|
-
return localOffset;
|
4182
|
+
return start + localOffset;
|
3762
4183
|
}
|
3763
|
-
|
3764
4184
|
forwardExhausted = true;
|
3765
4185
|
}
|
3766
|
-
|
3767
4186
|
if (!backwardExhausted) {
|
3768
4187
|
if (!forwardExhausted) {
|
3769
4188
|
wantForward = true;
|
3770
|
-
}
|
3771
|
-
// before offset location
|
3772
|
-
|
4189
|
+
}
|
3773
4190
|
|
4191
|
+
// Check if trying to fit before text beginning, and if not, check it fits
|
4192
|
+
// before offset location
|
3774
4193
|
if (minLine <= start - localOffset) {
|
3775
|
-
return -localOffset++;
|
4194
|
+
return start - localOffset++;
|
3776
4195
|
}
|
3777
|
-
|
3778
4196
|
backwardExhausted = true;
|
3779
4197
|
return iterator();
|
3780
|
-
}
|
3781
|
-
// hunk can't fit on the text. Return undefined
|
4198
|
+
}
|
3782
4199
|
|
4200
|
+
// We tried to fit hunk before text beginning and beyond text length, then
|
4201
|
+
// hunk can't fit on the text. Return undefined
|
3783
4202
|
};
|
3784
4203
|
}
|
3785
4204
|
|
3786
4205
|
function lib_applyPatch(source, uniDiff) {
|
3787
4206
|
var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
|
3788
|
-
|
3789
4207
|
if (typeof uniDiff === 'string') {
|
3790
4208
|
uniDiff = lib_parsePatch(uniDiff);
|
3791
4209
|
}
|
3792
|
-
|
3793
4210
|
if (Array.isArray(uniDiff)) {
|
3794
4211
|
if (uniDiff.length > 1) {
|
3795
4212
|
throw new Error('applyPatch only works with a single input.');
|
3796
4213
|
}
|
3797
|
-
|
3798
4214
|
uniDiff = uniDiff[0];
|
3799
|
-
}
|
3800
|
-
|
3801
|
-
|
3802
|
-
|
3803
|
-
|
3804
|
-
|
3805
|
-
|
3806
|
-
|
3807
|
-
},
|
3808
|
-
errorCount = 0,
|
3809
|
-
fuzzFactor = options.fuzzFactor || 0,
|
3810
|
-
minLine = 0,
|
3811
|
-
offset = 0,
|
3812
|
-
removeEOFNL,
|
3813
|
-
addEOFNL;
|
3814
|
-
/**
|
3815
|
-
* Checks if the hunk exactly fits on the provided location
|
3816
|
-
*/
|
4215
|
+
}
|
4216
|
+
if (options.autoConvertLineEndings || options.autoConvertLineEndings == null) {
|
4217
|
+
if (hasOnlyWinLineEndings(source) && isUnix(uniDiff)) {
|
4218
|
+
uniDiff = unixToWin(uniDiff);
|
4219
|
+
} else if (hasOnlyUnixLineEndings(source) && isWin(uniDiff)) {
|
4220
|
+
uniDiff = winToUnix(uniDiff);
|
4221
|
+
}
|
4222
|
+
}
|
3817
4223
|
|
4224
|
+
// Apply the diff to the input
|
4225
|
+
var lines = source.split('\n'),
|
4226
|
+
hunks = uniDiff.hunks,
|
4227
|
+
compareLine = options.compareLine || function (lineNumber, line, operation, patchContent) {
|
4228
|
+
return line === patchContent;
|
4229
|
+
},
|
4230
|
+
fuzzFactor = options.fuzzFactor || 0,
|
4231
|
+
minLine = 0;
|
4232
|
+
if (fuzzFactor < 0 || !Number.isInteger(fuzzFactor)) {
|
4233
|
+
throw new Error('fuzzFactor must be a non-negative integer');
|
4234
|
+
}
|
3818
4235
|
|
3819
|
-
|
3820
|
-
|
3821
|
-
|
3822
|
-
|
3823
|
-
content = line.length > 0 ? line.substr(1) : line;
|
4236
|
+
// Special case for empty patch.
|
4237
|
+
if (!hunks.length) {
|
4238
|
+
return source;
|
4239
|
+
}
|
3824
4240
|
|
3825
|
-
|
3826
|
-
|
3827
|
-
|
3828
|
-
|
4241
|
+
// Before anything else, handle EOFNL insertion/removal. If the patch tells us to make a change
|
4242
|
+
// to the EOFNL that is redundant/impossible - i.e. to remove a newline that's not there, or add a
|
4243
|
+
// newline that already exists - then we either return false and fail to apply the patch (if
|
4244
|
+
// fuzzFactor is 0) or simply ignore the problem and do nothing (if fuzzFactor is >0).
|
4245
|
+
// If we do need to remove/add a newline at EOF, this will always be in the final hunk:
|
4246
|
+
var prevLine = '',
|
4247
|
+
removeEOFNL = false,
|
4248
|
+
addEOFNL = false;
|
4249
|
+
for (var i = 0; i < hunks[hunks.length - 1].lines.length; i++) {
|
4250
|
+
var line = hunks[hunks.length - 1].lines[i];
|
4251
|
+
if (line[0] == '\\') {
|
4252
|
+
if (prevLine[0] == '+') {
|
4253
|
+
removeEOFNL = true;
|
4254
|
+
} else if (prevLine[0] == '-') {
|
4255
|
+
addEOFNL = true;
|
4256
|
+
}
|
4257
|
+
}
|
4258
|
+
prevLine = line;
|
4259
|
+
}
|
4260
|
+
if (removeEOFNL) {
|
4261
|
+
if (addEOFNL) {
|
4262
|
+
// This means the final line gets changed but doesn't have a trailing newline in either the
|
4263
|
+
// original or patched version. In that case, we do nothing if fuzzFactor > 0, and if
|
4264
|
+
// fuzzFactor is 0, we simply validate that the source file has no trailing newline.
|
4265
|
+
if (!fuzzFactor && lines[lines.length - 1] == '') {
|
4266
|
+
return false;
|
4267
|
+
}
|
4268
|
+
} else if (lines[lines.length - 1] == '') {
|
4269
|
+
lines.pop();
|
4270
|
+
} else if (!fuzzFactor) {
|
4271
|
+
return false;
|
4272
|
+
}
|
4273
|
+
} else if (addEOFNL) {
|
4274
|
+
if (lines[lines.length - 1] != '') {
|
4275
|
+
lines.push('');
|
4276
|
+
} else if (!fuzzFactor) {
|
4277
|
+
return false;
|
4278
|
+
}
|
4279
|
+
}
|
3829
4280
|
|
3830
|
-
|
3831
|
-
|
4281
|
+
/**
|
4282
|
+
* Checks if the hunk can be made to fit at the provided location with at most `maxErrors`
|
4283
|
+
* insertions, substitutions, or deletions, while ensuring also that:
|
4284
|
+
* - lines deleted in the hunk match exactly, and
|
4285
|
+
* - wherever an insertion operation or block of insertion operations appears in the hunk, the
|
4286
|
+
* immediately preceding and following lines of context match exactly
|
4287
|
+
*
|
4288
|
+
* `toPos` should be set such that lines[toPos] is meant to match hunkLines[0].
|
4289
|
+
*
|
4290
|
+
* If the hunk can be applied, returns an object with properties `oldLineLastI` and
|
4291
|
+
* `replacementLines`. Otherwise, returns null.
|
4292
|
+
*/
|
4293
|
+
function applyHunk(hunkLines, toPos, maxErrors) {
|
4294
|
+
var hunkLinesI = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : 0;
|
4295
|
+
var lastContextLineMatched = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : true;
|
4296
|
+
var patchedLines = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : [];
|
4297
|
+
var patchedLinesLength = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : 0;
|
4298
|
+
var nConsecutiveOldContextLines = 0;
|
4299
|
+
var nextContextLineMustMatch = false;
|
4300
|
+
for (; hunkLinesI < hunkLines.length; hunkLinesI++) {
|
4301
|
+
var hunkLine = hunkLines[hunkLinesI],
|
4302
|
+
operation = hunkLine.length > 0 ? hunkLine[0] : ' ',
|
4303
|
+
content = hunkLine.length > 0 ? hunkLine.substr(1) : hunkLine;
|
4304
|
+
if (operation === '-') {
|
4305
|
+
if (compareLine(toPos + 1, lines[toPos], operation, content)) {
|
4306
|
+
toPos++;
|
4307
|
+
nConsecutiveOldContextLines = 0;
|
4308
|
+
} else {
|
4309
|
+
if (!maxErrors || lines[toPos] == null) {
|
4310
|
+
return null;
|
3832
4311
|
}
|
4312
|
+
patchedLines[patchedLinesLength] = lines[toPos];
|
4313
|
+
return applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1);
|
3833
4314
|
}
|
4315
|
+
}
|
4316
|
+
if (operation === '+') {
|
4317
|
+
if (!lastContextLineMatched) {
|
4318
|
+
return null;
|
4319
|
+
}
|
4320
|
+
patchedLines[patchedLinesLength] = content;
|
4321
|
+
patchedLinesLength++;
|
4322
|
+
nConsecutiveOldContextLines = 0;
|
4323
|
+
nextContextLineMustMatch = true;
|
4324
|
+
}
|
4325
|
+
if (operation === ' ') {
|
4326
|
+
nConsecutiveOldContextLines++;
|
4327
|
+
patchedLines[patchedLinesLength] = lines[toPos];
|
4328
|
+
if (compareLine(toPos + 1, lines[toPos], operation, content)) {
|
4329
|
+
patchedLinesLength++;
|
4330
|
+
lastContextLineMatched = true;
|
4331
|
+
nextContextLineMustMatch = false;
|
4332
|
+
toPos++;
|
4333
|
+
} else {
|
4334
|
+
if (nextContextLineMustMatch || !maxErrors) {
|
4335
|
+
return null;
|
4336
|
+
}
|
3834
4337
|
|
3835
|
-
|
4338
|
+
// Consider 3 possibilities in sequence:
|
4339
|
+
// 1. lines contains a *substitution* not included in the patch context, or
|
4340
|
+
// 2. lines contains an *insertion* not included in the patch context, or
|
4341
|
+
// 3. lines contains a *deletion* not included in the patch context
|
4342
|
+
// The first two options are of course only possible if the line from lines is non-null -
|
4343
|
+
// i.e. only option 3 is possible if we've overrun the end of the old file.
|
4344
|
+
return lines[toPos] && (applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength + 1) || applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1)) || applyHunk(hunkLines, toPos, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength);
|
4345
|
+
}
|
3836
4346
|
}
|
3837
4347
|
}
|
3838
4348
|
|
3839
|
-
|
3840
|
-
|
3841
|
-
|
3842
|
-
|
3843
|
-
|
3844
|
-
|
3845
|
-
|
3846
|
-
|
3847
|
-
|
3848
|
-
|
4349
|
+
// Before returning, trim any unmodified context lines off the end of patchedLines and reduce
|
4350
|
+
// toPos (and thus oldLineLastI) accordingly. This allows later hunks to be applied to a region
|
4351
|
+
// that starts in this hunk's trailing context.
|
4352
|
+
patchedLinesLength -= nConsecutiveOldContextLines;
|
4353
|
+
toPos -= nConsecutiveOldContextLines;
|
4354
|
+
patchedLines.length = patchedLinesLength;
|
4355
|
+
return {
|
4356
|
+
patchedLines: patchedLines,
|
4357
|
+
oldLineLastI: toPos - 1
|
4358
|
+
};
|
4359
|
+
}
|
4360
|
+
var resultLines = [];
|
3849
4361
|
|
3850
|
-
|
3851
|
-
|
3852
|
-
|
4362
|
+
// Search best fit offsets for each hunk based on the previous ones
|
4363
|
+
var prevHunkOffset = 0;
|
4364
|
+
for (var _i = 0; _i < hunks.length; _i++) {
|
4365
|
+
var hunk = hunks[_i];
|
4366
|
+
var hunkResult = void 0;
|
4367
|
+
var maxLine = lines.length - hunk.oldLines + fuzzFactor;
|
4368
|
+
var toPos = void 0;
|
4369
|
+
for (var maxErrors = 0; maxErrors <= fuzzFactor; maxErrors++) {
|
4370
|
+
toPos = hunk.oldStart + prevHunkOffset - 1;
|
4371
|
+
var iterator = lib_distanceIterator(toPos, minLine, maxLine);
|
4372
|
+
for (; toPos !== undefined; toPos = iterator()) {
|
4373
|
+
hunkResult = applyHunk(hunk.lines, toPos, maxErrors);
|
4374
|
+
if (hunkResult) {
|
4375
|
+
break;
|
4376
|
+
}
|
4377
|
+
}
|
4378
|
+
if (hunkResult) {
|
3853
4379
|
break;
|
3854
4380
|
}
|
3855
4381
|
}
|
3856
|
-
|
3857
|
-
if (localOffset === undefined) {
|
4382
|
+
if (!hunkResult) {
|
3858
4383
|
return false;
|
3859
|
-
}
|
3860
|
-
// to fit over already patched text
|
3861
|
-
|
3862
|
-
|
3863
|
-
minLine = hunk.offset + hunk.oldStart + hunk.oldLines;
|
3864
|
-
} // Apply patch hunks
|
3865
|
-
|
3866
|
-
|
3867
|
-
var diffOffset = 0;
|
3868
|
-
|
3869
|
-
for (var _i = 0; _i < hunks.length; _i++) {
|
3870
|
-
var _hunk = hunks[_i],
|
3871
|
-
_toPos = _hunk.oldStart + _hunk.offset + diffOffset - 1;
|
3872
|
-
|
3873
|
-
diffOffset += _hunk.newLines - _hunk.oldLines;
|
3874
|
-
|
3875
|
-
for (var j = 0; j < _hunk.lines.length; j++) {
|
3876
|
-
var line = _hunk.lines[j],
|
3877
|
-
operation = line.length > 0 ? line[0] : ' ',
|
3878
|
-
content = line.length > 0 ? line.substr(1) : line,
|
3879
|
-
delimiter = _hunk.linedelimiters[j];
|
4384
|
+
}
|
3880
4385
|
|
3881
|
-
|
3882
|
-
|
3883
|
-
|
3884
|
-
|
3885
|
-
delimiters.splice(_toPos, 1);
|
3886
|
-
/* istanbul ignore else */
|
3887
|
-
} else if (operation === '+') {
|
3888
|
-
lines.splice(_toPos, 0, content);
|
3889
|
-
delimiters.splice(_toPos, 0, delimiter);
|
3890
|
-
_toPos++;
|
3891
|
-
} else if (operation === '\\') {
|
3892
|
-
var previousOperation = _hunk.lines[j - 1] ? _hunk.lines[j - 1][0] : null;
|
4386
|
+
// Copy everything from the end of where we applied the last hunk to the start of this hunk
|
4387
|
+
for (var _i2 = minLine; _i2 < toPos; _i2++) {
|
4388
|
+
resultLines.push(lines[_i2]);
|
4389
|
+
}
|
3893
4390
|
|
3894
|
-
|
3895
|
-
|
3896
|
-
|
3897
|
-
|
3898
|
-
}
|
3899
|
-
}
|
4391
|
+
// Add the lines produced by applying the hunk:
|
4392
|
+
for (var _i3 = 0; _i3 < hunkResult.patchedLines.length; _i3++) {
|
4393
|
+
var _line = hunkResult.patchedLines[_i3];
|
4394
|
+
resultLines.push(_line);
|
3900
4395
|
}
|
3901
|
-
} // Handle EOFNL insertion/removal
|
3902
4396
|
|
4397
|
+
// Set lower text limit to end of the current hunk, so next ones don't try
|
4398
|
+
// to fit over already patched text
|
4399
|
+
minLine = hunkResult.oldLineLastI + 1;
|
3903
4400
|
|
3904
|
-
|
3905
|
-
|
3906
|
-
|
3907
|
-
delimiters.pop();
|
3908
|
-
}
|
3909
|
-
} else if (addEOFNL) {
|
3910
|
-
lines.push('');
|
3911
|
-
delimiters.push('\n');
|
4401
|
+
// Note the offset between where the patch said the hunk should've applied and where we
|
4402
|
+
// applied it, so we can adjust future hunks accordingly:
|
4403
|
+
prevHunkOffset = toPos + 1 - hunk.oldStart;
|
3912
4404
|
}
|
3913
4405
|
|
3914
|
-
|
3915
|
-
|
4406
|
+
// Copy over the rest of the lines from the old text
|
4407
|
+
for (var _i4 = minLine; _i4 < lines.length; _i4++) {
|
4408
|
+
resultLines.push(lines[_i4]);
|
3916
4409
|
}
|
4410
|
+
return resultLines.join('\n');
|
4411
|
+
}
|
3917
4412
|
|
3918
|
-
|
3919
|
-
} // Wrapper that supports multiple file patches via callbacks.
|
3920
|
-
|
4413
|
+
// Wrapper that supports multiple file patches via callbacks.
|
3921
4414
|
function lib_applyPatches(uniDiff, options) {
|
3922
4415
|
if (typeof uniDiff === 'string') {
|
3923
4416
|
uniDiff = lib_parsePatch(uniDiff);
|
3924
4417
|
}
|
3925
|
-
|
3926
4418
|
var currentIndex = 0;
|
3927
|
-
|
3928
4419
|
function processIndex() {
|
3929
4420
|
var index = uniDiff[currentIndex++];
|
3930
|
-
|
3931
4421
|
if (!index) {
|
3932
4422
|
return options.complete();
|
3933
4423
|
}
|
3934
|
-
|
3935
4424
|
options.loadFile(index, function (err, data) {
|
3936
4425
|
if (err) {
|
3937
4426
|
return options.complete(err);
|
3938
4427
|
}
|
3939
|
-
|
3940
4428
|
var updatedContent = lib_applyPatch(data, index, options);
|
3941
4429
|
options.patched(index, updatedContent, function (err) {
|
3942
4430
|
if (err) {
|
3943
4431
|
return options.complete(err);
|
3944
4432
|
}
|
3945
|
-
|
3946
4433
|
processIndex();
|
3947
4434
|
});
|
3948
4435
|
});
|
3949
4436
|
}
|
3950
|
-
|
3951
4437
|
processIndex();
|
3952
4438
|
}
|
3953
4439
|
|
@@ -3955,202 +4441,238 @@ function lib_structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader
|
|
3955
4441
|
if (!options) {
|
3956
4442
|
options = {};
|
3957
4443
|
}
|
3958
|
-
|
4444
|
+
if (typeof options === 'function') {
|
4445
|
+
options = {
|
4446
|
+
callback: options
|
4447
|
+
};
|
4448
|
+
}
|
3959
4449
|
if (typeof options.context === 'undefined') {
|
3960
4450
|
options.context = 4;
|
3961
4451
|
}
|
3962
|
-
|
3963
|
-
|
3964
|
-
|
3965
|
-
if (!
|
3966
|
-
return;
|
4452
|
+
if (options.newlineIsToken) {
|
4453
|
+
throw new Error('newlineIsToken may not be used with patch-generation functions, only with diffing functions');
|
4454
|
+
}
|
4455
|
+
if (!options.callback) {
|
4456
|
+
return diffLinesResultToPatch(lib_diffLines(oldStr, newStr, options));
|
4457
|
+
} else {
|
4458
|
+
var _options = options,
|
4459
|
+
_callback = _options.callback;
|
4460
|
+
lib_diffLines(oldStr, newStr, _objectSpread2(_objectSpread2({}, options), {}, {
|
4461
|
+
callback: function callback(diff) {
|
4462
|
+
var patch = diffLinesResultToPatch(diff);
|
4463
|
+
_callback(patch);
|
4464
|
+
}
|
4465
|
+
}));
|
3967
4466
|
}
|
4467
|
+
function diffLinesResultToPatch(diff) {
|
4468
|
+
// STEP 1: Build up the patch with no "" lines and with the arrays
|
4469
|
+
// of lines containing trailing newline characters. We'll tidy up later...
|
3968
4470
|
|
3969
|
-
|
3970
|
-
|
3971
|
-
|
3972
|
-
|
3973
|
-
|
3974
|
-
|
3975
|
-
|
3976
|
-
return ' ' + entry;
|
3977
|
-
});
|
3978
|
-
}
|
4471
|
+
if (!diff) {
|
4472
|
+
return;
|
4473
|
+
}
|
4474
|
+
diff.push({
|
4475
|
+
value: '',
|
4476
|
+
lines: []
|
4477
|
+
}); // Append an empty value to make cleanup easier
|
3979
4478
|
|
3980
|
-
|
3981
|
-
|
4479
|
+
function contextLines(lines) {
|
4480
|
+
return lines.map(function (entry) {
|
4481
|
+
return ' ' + entry;
|
4482
|
+
});
|
4483
|
+
}
|
4484
|
+
var hunks = [];
|
4485
|
+
var oldRangeStart = 0,
|
3982
4486
|
newRangeStart = 0,
|
3983
4487
|
curRange = [],
|
3984
4488
|
oldLine = 1,
|
3985
4489
|
newLine = 1;
|
3986
|
-
|
3987
|
-
|
3988
|
-
|
3989
|
-
|
3990
|
-
|
3991
|
-
|
3992
|
-
|
3993
|
-
|
3994
|
-
|
3995
|
-
|
3996
|
-
|
3997
|
-
|
3998
|
-
|
3999
|
-
|
4000
|
-
|
4001
|
-
|
4002
|
-
curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
|
4003
|
-
oldRangeStart -= curRange.length;
|
4004
|
-
newRangeStart -= curRange.length;
|
4490
|
+
var _loop = function _loop() {
|
4491
|
+
var current = diff[i],
|
4492
|
+
lines = current.lines || splitLines(current.value);
|
4493
|
+
current.lines = lines;
|
4494
|
+
if (current.added || current.removed) {
|
4495
|
+
var _curRange;
|
4496
|
+
// If we have previous context, start with that
|
4497
|
+
if (!oldRangeStart) {
|
4498
|
+
var prev = diff[i - 1];
|
4499
|
+
oldRangeStart = oldLine;
|
4500
|
+
newRangeStart = newLine;
|
4501
|
+
if (prev) {
|
4502
|
+
curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
|
4503
|
+
oldRangeStart -= curRange.length;
|
4504
|
+
newRangeStart -= curRange.length;
|
4505
|
+
}
|
4005
4506
|
}
|
4006
|
-
} // Output our changes
|
4007
|
-
|
4008
|
-
|
4009
|
-
(_curRange = curRange).push.apply(_curRange, lib_toConsumableArray(lines.map(function (entry) {
|
4010
|
-
return (current.added ? '+' : '-') + entry;
|
4011
|
-
}))); // Track the updated file position
|
4012
4507
|
|
4508
|
+
// Output our changes
|
4509
|
+
(_curRange = curRange).push.apply(_curRange, lib_toConsumableArray(lines.map(function (entry) {
|
4510
|
+
return (current.added ? '+' : '-') + entry;
|
4511
|
+
})));
|
4013
4512
|
|
4014
|
-
|
4015
|
-
|
4513
|
+
// Track the updated file position
|
4514
|
+
if (current.added) {
|
4515
|
+
newLine += lines.length;
|
4516
|
+
} else {
|
4517
|
+
oldLine += lines.length;
|
4518
|
+
}
|
4016
4519
|
} else {
|
4520
|
+
// Identical context lines. Track line changes
|
4521
|
+
if (oldRangeStart) {
|
4522
|
+
// Close out any changes that have been output (or join overlapping)
|
4523
|
+
if (lines.length <= options.context * 2 && i < diff.length - 2) {
|
4524
|
+
var _curRange2;
|
4525
|
+
// Overlapping
|
4526
|
+
(_curRange2 = curRange).push.apply(_curRange2, lib_toConsumableArray(contextLines(lines)));
|
4527
|
+
} else {
|
4528
|
+
var _curRange3;
|
4529
|
+
// end the range and output
|
4530
|
+
var contextSize = Math.min(lines.length, options.context);
|
4531
|
+
(_curRange3 = curRange).push.apply(_curRange3, lib_toConsumableArray(contextLines(lines.slice(0, contextSize))));
|
4532
|
+
var _hunk = {
|
4533
|
+
oldStart: oldRangeStart,
|
4534
|
+
oldLines: oldLine - oldRangeStart + contextSize,
|
4535
|
+
newStart: newRangeStart,
|
4536
|
+
newLines: newLine - newRangeStart + contextSize,
|
4537
|
+
lines: curRange
|
4538
|
+
};
|
4539
|
+
hunks.push(_hunk);
|
4540
|
+
oldRangeStart = 0;
|
4541
|
+
newRangeStart = 0;
|
4542
|
+
curRange = [];
|
4543
|
+
}
|
4544
|
+
}
|
4017
4545
|
oldLine += lines.length;
|
4546
|
+
newLine += lines.length;
|
4018
4547
|
}
|
4019
|
-
}
|
4020
|
-
|
4021
|
-
|
4022
|
-
|
4023
|
-
if (lines.length <= options.context * 2 && i < diff.length - 2) {
|
4024
|
-
var _curRange2;
|
4548
|
+
};
|
4549
|
+
for (var i = 0; i < diff.length; i++) {
|
4550
|
+
_loop();
|
4551
|
+
}
|
4025
4552
|
|
4026
|
-
|
4027
|
-
|
4553
|
+
// Step 2: eliminate the trailing `\n` from each line of each hunk, and, where needed, add
|
4554
|
+
// "".
|
4555
|
+
for (var _i = 0, _hunks = hunks; _i < _hunks.length; _i++) {
|
4556
|
+
var hunk = _hunks[_i];
|
4557
|
+
for (var _i2 = 0; _i2 < hunk.lines.length; _i2++) {
|
4558
|
+
if (hunk.lines[_i2].endsWith('\n')) {
|
4559
|
+
hunk.lines[_i2] = hunk.lines[_i2].slice(0, -1);
|
4028
4560
|
} else {
|
4029
|
-
|
4030
|
-
|
4031
|
-
// end the range and output
|
4032
|
-
var contextSize = Math.min(lines.length, options.context);
|
4033
|
-
|
4034
|
-
(_curRange3 = curRange).push.apply(_curRange3, lib_toConsumableArray(contextLines(lines.slice(0, contextSize))));
|
4035
|
-
|
4036
|
-
var hunk = {
|
4037
|
-
oldStart: oldRangeStart,
|
4038
|
-
oldLines: oldLine - oldRangeStart + contextSize,
|
4039
|
-
newStart: newRangeStart,
|
4040
|
-
newLines: newLine - newRangeStart + contextSize,
|
4041
|
-
lines: curRange
|
4042
|
-
};
|
4043
|
-
|
4044
|
-
if (i >= diff.length - 2 && lines.length <= options.context) {
|
4045
|
-
// EOF is inside this hunk
|
4046
|
-
var oldEOFNewline = /\n$/.test(oldStr);
|
4047
|
-
var newEOFNewline = /\n$/.test(newStr);
|
4048
|
-
var noNlBeforeAdds = lines.length == 0 && curRange.length > hunk.oldLines;
|
4049
|
-
|
4050
|
-
if (!oldEOFNewline && noNlBeforeAdds && oldStr.length > 0) {
|
4051
|
-
// special case: old has no eol and no trailing context; no-nl can end up before adds
|
4052
|
-
// however, if the old file is empty, do not output the no-nl line
|
4053
|
-
curRange.splice(hunk.oldLines, 0, '\');
|
4054
|
-
}
|
4055
|
-
|
4056
|
-
if (!oldEOFNewline && !noNlBeforeAdds || !newEOFNewline) {
|
4057
|
-
curRange.push('\');
|
4058
|
-
}
|
4059
|
-
}
|
4060
|
-
|
4061
|
-
hunks.push(hunk);
|
4062
|
-
oldRangeStart = 0;
|
4063
|
-
newRangeStart = 0;
|
4064
|
-
curRange = [];
|
4561
|
+
hunk.lines.splice(_i2 + 1, 0, '\');
|
4562
|
+
_i2++; // Skip the line we just added, then continue iterating
|
4065
4563
|
}
|
4066
4564
|
}
|
4067
|
-
|
4068
|
-
oldLine += lines.length;
|
4069
|
-
newLine += lines.length;
|
4070
4565
|
}
|
4071
|
-
|
4072
|
-
|
4073
|
-
|
4074
|
-
|
4566
|
+
return {
|
4567
|
+
oldFileName: oldFileName,
|
4568
|
+
newFileName: newFileName,
|
4569
|
+
oldHeader: oldHeader,
|
4570
|
+
newHeader: newHeader,
|
4571
|
+
hunks: hunks
|
4572
|
+
};
|
4075
4573
|
}
|
4076
|
-
|
4077
|
-
return {
|
4078
|
-
oldFileName: oldFileName,
|
4079
|
-
newFileName: newFileName,
|
4080
|
-
oldHeader: oldHeader,
|
4081
|
-
newHeader: newHeader,
|
4082
|
-
hunks: hunks
|
4083
|
-
};
|
4084
4574
|
}
|
4085
4575
|
function lib_formatPatch(diff) {
|
4576
|
+
if (Array.isArray(diff)) {
|
4577
|
+
return diff.map(lib_formatPatch).join('\n');
|
4578
|
+
}
|
4086
4579
|
var ret = [];
|
4087
|
-
|
4088
4580
|
if (diff.oldFileName == diff.newFileName) {
|
4089
4581
|
ret.push('Index: ' + diff.oldFileName);
|
4090
4582
|
}
|
4091
|
-
|
4092
4583
|
ret.push('===================================================================');
|
4093
4584
|
ret.push('--- ' + diff.oldFileName + (typeof diff.oldHeader === 'undefined' ? '' : '\t' + diff.oldHeader));
|
4094
4585
|
ret.push('+++ ' + diff.newFileName + (typeof diff.newHeader === 'undefined' ? '' : '\t' + diff.newHeader));
|
4095
|
-
|
4096
4586
|
for (var i = 0; i < diff.hunks.length; i++) {
|
4097
|
-
var hunk = diff.hunks[i];
|
4587
|
+
var hunk = diff.hunks[i];
|
4588
|
+
// Unified Diff Format quirk: If the chunk size is 0,
|
4098
4589
|
// the first number is one lower than one would expect.
|
4099
4590
|
// https://www.artima.com/weblogs/viewpost.jsp?thread=164293
|
4100
|
-
|
4101
4591
|
if (hunk.oldLines === 0) {
|
4102
4592
|
hunk.oldStart -= 1;
|
4103
4593
|
}
|
4104
|
-
|
4105
4594
|
if (hunk.newLines === 0) {
|
4106
4595
|
hunk.newStart -= 1;
|
4107
4596
|
}
|
4108
|
-
|
4109
4597
|
ret.push('@@ -' + hunk.oldStart + ',' + hunk.oldLines + ' +' + hunk.newStart + ',' + hunk.newLines + ' @@');
|
4110
4598
|
ret.push.apply(ret, hunk.lines);
|
4111
4599
|
}
|
4112
|
-
|
4113
4600
|
return ret.join('\n') + '\n';
|
4114
4601
|
}
|
4115
4602
|
function lib_createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
|
4116
|
-
|
4603
|
+
var _options2;
|
4604
|
+
if (typeof options === 'function') {
|
4605
|
+
options = {
|
4606
|
+
callback: options
|
4607
|
+
};
|
4608
|
+
}
|
4609
|
+
if (!((_options2 = options) !== null && _options2 !== void 0 && _options2.callback)) {
|
4610
|
+
var patchObj = lib_structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options);
|
4611
|
+
if (!patchObj) {
|
4612
|
+
return;
|
4613
|
+
}
|
4614
|
+
return lib_formatPatch(patchObj);
|
4615
|
+
} else {
|
4616
|
+
var _options3 = options,
|
4617
|
+
_callback2 = _options3.callback;
|
4618
|
+
lib_structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, _objectSpread2(_objectSpread2({}, options), {}, {
|
4619
|
+
callback: function callback(patchObj) {
|
4620
|
+
if (!patchObj) {
|
4621
|
+
_callback2();
|
4622
|
+
} else {
|
4623
|
+
_callback2(lib_formatPatch(patchObj));
|
4624
|
+
}
|
4625
|
+
}
|
4626
|
+
}));
|
4627
|
+
}
|
4117
4628
|
}
|
4118
4629
|
function lib_createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {
|
4119
4630
|
return lib_createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);
|
4120
4631
|
}
|
4121
4632
|
|
4633
|
+
/**
|
4634
|
+
* Split `text` into an array of lines, including the trailing newline character (where present)
|
4635
|
+
*/
|
4636
|
+
function splitLines(text) {
|
4637
|
+
var hasTrailingNl = text.endsWith('\n');
|
4638
|
+
var result = text.split('\n').map(function (line) {
|
4639
|
+
return line + '\n';
|
4640
|
+
});
|
4641
|
+
if (hasTrailingNl) {
|
4642
|
+
result.pop();
|
4643
|
+
} else {
|
4644
|
+
result.push(result.pop().slice(0, -1));
|
4645
|
+
}
|
4646
|
+
return result;
|
4647
|
+
}
|
4648
|
+
|
4122
4649
|
function lib_arrayEqual(a, b) {
|
4123
4650
|
if (a.length !== b.length) {
|
4124
4651
|
return false;
|
4125
4652
|
}
|
4126
|
-
|
4127
4653
|
return lib_arrayStartsWith(a, b);
|
4128
4654
|
}
|
4129
4655
|
function lib_arrayStartsWith(array, start) {
|
4130
4656
|
if (start.length > array.length) {
|
4131
4657
|
return false;
|
4132
4658
|
}
|
4133
|
-
|
4134
4659
|
for (var i = 0; i < start.length; i++) {
|
4135
4660
|
if (start[i] !== array[i]) {
|
4136
4661
|
return false;
|
4137
4662
|
}
|
4138
4663
|
}
|
4139
|
-
|
4140
4664
|
return true;
|
4141
4665
|
}
|
4142
4666
|
|
4143
4667
|
function lib_calcLineCount(hunk) {
|
4144
4668
|
var _calcOldNewLineCount = lib_calcOldNewLineCount(hunk.lines),
|
4145
|
-
|
4146
|
-
|
4147
|
-
|
4669
|
+
oldLines = _calcOldNewLineCount.oldLines,
|
4670
|
+
newLines = _calcOldNewLineCount.newLines;
|
4148
4671
|
if (oldLines !== undefined) {
|
4149
4672
|
hunk.oldLines = oldLines;
|
4150
4673
|
} else {
|
4151
4674
|
delete hunk.oldLines;
|
4152
4675
|
}
|
4153
|
-
|
4154
4676
|
if (newLines !== undefined) {
|
4155
4677
|
hunk.newLines = newLines;
|
4156
4678
|
} else {
|
@@ -4160,14 +4682,14 @@ function lib_calcLineCount(hunk) {
|
|
4160
4682
|
function lib_merge(mine, theirs, base) {
|
4161
4683
|
mine = lib_loadPatch(mine, base);
|
4162
4684
|
theirs = lib_loadPatch(theirs, base);
|
4163
|
-
var ret = {};
|
4685
|
+
var ret = {};
|
4686
|
+
|
4687
|
+
// For index we just let it pass through as it doesn't have any necessary meaning.
|
4164
4688
|
// Leaving sanity checks on this to the API consumer that may know more about the
|
4165
4689
|
// meaning in their own context.
|
4166
|
-
|
4167
4690
|
if (mine.index || theirs.index) {
|
4168
4691
|
ret.index = mine.index || theirs.index;
|
4169
4692
|
}
|
4170
|
-
|
4171
4693
|
if (mine.newFileName || theirs.newFileName) {
|
4172
4694
|
if (!lib_fileNameChanged(mine)) {
|
4173
4695
|
// No header or no change in ours, use theirs (and ours if theirs does not exist)
|
@@ -4189,21 +4711,18 @@ function lib_merge(mine, theirs, base) {
|
|
4189
4711
|
ret.newHeader = lib_selectField(ret, mine.newHeader, theirs.newHeader);
|
4190
4712
|
}
|
4191
4713
|
}
|
4192
|
-
|
4193
4714
|
ret.hunks = [];
|
4194
4715
|
var mineIndex = 0,
|
4195
|
-
|
4196
|
-
|
4197
|
-
|
4198
|
-
|
4716
|
+
theirsIndex = 0,
|
4717
|
+
mineOffset = 0,
|
4718
|
+
theirsOffset = 0;
|
4199
4719
|
while (mineIndex < mine.hunks.length || theirsIndex < theirs.hunks.length) {
|
4200
4720
|
var mineCurrent = mine.hunks[mineIndex] || {
|
4201
|
-
|
4202
|
-
|
4203
|
-
|
4204
|
-
|
4205
|
-
|
4206
|
-
|
4721
|
+
oldStart: Infinity
|
4722
|
+
},
|
4723
|
+
theirsCurrent = theirs.hunks[theirsIndex] || {
|
4724
|
+
oldStart: Infinity
|
4725
|
+
};
|
4207
4726
|
if (lib_hunkBefore(mineCurrent, theirsCurrent)) {
|
4208
4727
|
// This patch does not overlap with any of the others, yay.
|
4209
4728
|
ret.hunks.push(lib_cloneHunk(mineCurrent, mineOffset));
|
@@ -4229,30 +4748,23 @@ function lib_merge(mine, theirs, base) {
|
|
4229
4748
|
ret.hunks.push(mergedHunk);
|
4230
4749
|
}
|
4231
4750
|
}
|
4232
|
-
|
4233
4751
|
return ret;
|
4234
4752
|
}
|
4235
|
-
|
4236
4753
|
function lib_loadPatch(param, base) {
|
4237
4754
|
if (typeof param === 'string') {
|
4238
4755
|
if (/^@@/m.test(param) || /^Index:/m.test(param)) {
|
4239
4756
|
return lib_parsePatch(param)[0];
|
4240
4757
|
}
|
4241
|
-
|
4242
4758
|
if (!base) {
|
4243
4759
|
throw new Error('Must provide a base reference or pass in a patch');
|
4244
4760
|
}
|
4245
|
-
|
4246
4761
|
return lib_structuredPatch(undefined, undefined, base, param);
|
4247
4762
|
}
|
4248
|
-
|
4249
4763
|
return param;
|
4250
4764
|
}
|
4251
|
-
|
4252
4765
|
function lib_fileNameChanged(patch) {
|
4253
4766
|
return patch.newFileName && patch.newFileName !== patch.oldFileName;
|
4254
4767
|
}
|
4255
|
-
|
4256
4768
|
function lib_selectField(index, mine, theirs) {
|
4257
4769
|
if (mine === theirs) {
|
4258
4770
|
return mine;
|
@@ -4264,11 +4776,9 @@ function lib_selectField(index, mine, theirs) {
|
|
4264
4776
|
};
|
4265
4777
|
}
|
4266
4778
|
}
|
4267
|
-
|
4268
4779
|
function lib_hunkBefore(test, check) {
|
4269
4780
|
return test.oldStart < check.oldStart && test.oldStart + test.oldLines < check.oldStart;
|
4270
4781
|
}
|
4271
|
-
|
4272
4782
|
function lib_cloneHunk(hunk, offset) {
|
4273
4783
|
return {
|
4274
4784
|
oldStart: hunk.oldStart,
|
@@ -4278,39 +4788,37 @@ function lib_cloneHunk(hunk, offset) {
|
|
4278
4788
|
lines: hunk.lines
|
4279
4789
|
};
|
4280
4790
|
}
|
4281
|
-
|
4282
4791
|
function lib_mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
|
4283
4792
|
// This will generally result in a conflicted hunk, but there are cases where the context
|
4284
4793
|
// is the only overlap where we can successfully merge the content here.
|
4285
4794
|
var mine = {
|
4286
|
-
|
4287
|
-
|
4288
|
-
|
4289
|
-
|
4290
|
-
|
4291
|
-
|
4292
|
-
|
4293
|
-
|
4294
|
-
|
4795
|
+
offset: mineOffset,
|
4796
|
+
lines: mineLines,
|
4797
|
+
index: 0
|
4798
|
+
},
|
4799
|
+
their = {
|
4800
|
+
offset: theirOffset,
|
4801
|
+
lines: theirLines,
|
4802
|
+
index: 0
|
4803
|
+
};
|
4295
4804
|
|
4805
|
+
// Handle any leading content
|
4296
4806
|
lib_insertLeading(hunk, mine, their);
|
4297
|
-
lib_insertLeading(hunk, their, mine);
|
4807
|
+
lib_insertLeading(hunk, their, mine);
|
4298
4808
|
|
4809
|
+
// Now in the overlap content. Scan through and select the best changes from each.
|
4299
4810
|
while (mine.index < mine.lines.length && their.index < their.lines.length) {
|
4300
4811
|
var mineCurrent = mine.lines[mine.index],
|
4301
|
-
|
4302
|
-
|
4812
|
+
theirCurrent = their.lines[their.index];
|
4303
4813
|
if ((mineCurrent[0] === '-' || mineCurrent[0] === '+') && (theirCurrent[0] === '-' || theirCurrent[0] === '+')) {
|
4304
4814
|
// Both modified ...
|
4305
4815
|
lib_mutualChange(hunk, mine, their);
|
4306
4816
|
} else if (mineCurrent[0] === '+' && theirCurrent[0] === ' ') {
|
4307
4817
|
var _hunk$lines;
|
4308
|
-
|
4309
4818
|
// Mine inserted
|
4310
4819
|
(_hunk$lines = hunk.lines).push.apply(_hunk$lines, lib_toConsumableArray(lib_collectChange(mine)));
|
4311
4820
|
} else if (theirCurrent[0] === '+' && mineCurrent[0] === ' ') {
|
4312
4821
|
var _hunk$lines2;
|
4313
|
-
|
4314
4822
|
// Theirs inserted
|
4315
4823
|
(_hunk$lines2 = hunk.lines).push.apply(_hunk$lines2, lib_toConsumableArray(lib_collectChange(their)));
|
4316
4824
|
} else if (mineCurrent[0] === '-' && theirCurrent[0] === ' ') {
|
@@ -4328,57 +4836,44 @@ function lib_mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
|
|
4328
4836
|
// Context mismatch
|
4329
4837
|
lib_conflict(hunk, lib_collectChange(mine), lib_collectChange(their));
|
4330
4838
|
}
|
4331
|
-
}
|
4332
|
-
|
4839
|
+
}
|
4333
4840
|
|
4841
|
+
// Now push anything that may be remaining
|
4334
4842
|
lib_insertTrailing(hunk, mine);
|
4335
4843
|
lib_insertTrailing(hunk, their);
|
4336
4844
|
lib_calcLineCount(hunk);
|
4337
4845
|
}
|
4338
|
-
|
4339
4846
|
function lib_mutualChange(hunk, mine, their) {
|
4340
4847
|
var myChanges = lib_collectChange(mine),
|
4341
|
-
|
4342
|
-
|
4848
|
+
theirChanges = lib_collectChange(their);
|
4343
4849
|
if (lib_allRemoves(myChanges) && lib_allRemoves(theirChanges)) {
|
4344
4850
|
// Special case for remove changes that are supersets of one another
|
4345
4851
|
if (lib_arrayStartsWith(myChanges, theirChanges) && lib_skipRemoveSuperset(their, myChanges, myChanges.length - theirChanges.length)) {
|
4346
4852
|
var _hunk$lines3;
|
4347
|
-
|
4348
4853
|
(_hunk$lines3 = hunk.lines).push.apply(_hunk$lines3, lib_toConsumableArray(myChanges));
|
4349
|
-
|
4350
4854
|
return;
|
4351
4855
|
} else if (lib_arrayStartsWith(theirChanges, myChanges) && lib_skipRemoveSuperset(mine, theirChanges, theirChanges.length - myChanges.length)) {
|
4352
4856
|
var _hunk$lines4;
|
4353
|
-
|
4354
4857
|
(_hunk$lines4 = hunk.lines).push.apply(_hunk$lines4, lib_toConsumableArray(theirChanges));
|
4355
|
-
|
4356
4858
|
return;
|
4357
4859
|
}
|
4358
4860
|
} else if (lib_arrayEqual(myChanges, theirChanges)) {
|
4359
4861
|
var _hunk$lines5;
|
4360
|
-
|
4361
4862
|
(_hunk$lines5 = hunk.lines).push.apply(_hunk$lines5, lib_toConsumableArray(myChanges));
|
4362
|
-
|
4363
4863
|
return;
|
4364
4864
|
}
|
4365
|
-
|
4366
4865
|
lib_conflict(hunk, myChanges, theirChanges);
|
4367
4866
|
}
|
4368
|
-
|
4369
4867
|
function lib_removal(hunk, mine, their, swap) {
|
4370
4868
|
var myChanges = lib_collectChange(mine),
|
4371
|
-
|
4372
|
-
|
4869
|
+
theirChanges = lib_collectContext(their, myChanges);
|
4373
4870
|
if (theirChanges.merged) {
|
4374
4871
|
var _hunk$lines6;
|
4375
|
-
|
4376
4872
|
(_hunk$lines6 = hunk.lines).push.apply(_hunk$lines6, lib_toConsumableArray(theirChanges.merged));
|
4377
4873
|
} else {
|
4378
4874
|
lib_conflict(hunk, swap ? theirChanges : myChanges, swap ? myChanges : theirChanges);
|
4379
4875
|
}
|
4380
4876
|
}
|
4381
|
-
|
4382
4877
|
function lib_conflict(hunk, mine, their) {
|
4383
4878
|
hunk.conflict = true;
|
4384
4879
|
hunk.lines.push({
|
@@ -4387,7 +4882,6 @@ function lib_conflict(hunk, mine, their) {
|
|
4387
4882
|
theirs: their
|
4388
4883
|
});
|
4389
4884
|
}
|
4390
|
-
|
4391
4885
|
function lib_insertLeading(hunk, insert, their) {
|
4392
4886
|
while (insert.offset < their.offset && insert.index < insert.lines.length) {
|
4393
4887
|
var line = insert.lines[insert.index++];
|
@@ -4395,25 +4889,22 @@ function lib_insertLeading(hunk, insert, their) {
|
|
4395
4889
|
insert.offset++;
|
4396
4890
|
}
|
4397
4891
|
}
|
4398
|
-
|
4399
4892
|
function lib_insertTrailing(hunk, insert) {
|
4400
4893
|
while (insert.index < insert.lines.length) {
|
4401
4894
|
var line = insert.lines[insert.index++];
|
4402
4895
|
hunk.lines.push(line);
|
4403
4896
|
}
|
4404
4897
|
}
|
4405
|
-
|
4406
4898
|
function lib_collectChange(state) {
|
4407
4899
|
var ret = [],
|
4408
|
-
|
4409
|
-
|
4900
|
+
operation = state.lines[state.index][0];
|
4410
4901
|
while (state.index < state.lines.length) {
|
4411
|
-
var line = state.lines[state.index];
|
4902
|
+
var line = state.lines[state.index];
|
4412
4903
|
|
4904
|
+
// Group additions that are immediately after subtractions and treat them as one "atomic" modify change.
|
4413
4905
|
if (operation === '-' && line[0] === '+') {
|
4414
4906
|
operation = '+';
|
4415
4907
|
}
|
4416
|
-
|
4417
4908
|
if (operation === line[0]) {
|
4418
4909
|
ret.push(line);
|
4419
4910
|
state.index++;
|
@@ -4421,39 +4912,35 @@ function lib_collectChange(state) {
|
|
4421
4912
|
break;
|
4422
4913
|
}
|
4423
4914
|
}
|
4424
|
-
|
4425
4915
|
return ret;
|
4426
4916
|
}
|
4427
|
-
|
4428
4917
|
function lib_collectContext(state, matchChanges) {
|
4429
4918
|
var changes = [],
|
4430
|
-
|
4431
|
-
|
4432
|
-
|
4433
|
-
|
4434
|
-
|
4919
|
+
merged = [],
|
4920
|
+
matchIndex = 0,
|
4921
|
+
contextChanges = false,
|
4922
|
+
conflicted = false;
|
4435
4923
|
while (matchIndex < matchChanges.length && state.index < state.lines.length) {
|
4436
4924
|
var change = state.lines[state.index],
|
4437
|
-
|
4925
|
+
match = matchChanges[matchIndex];
|
4438
4926
|
|
4927
|
+
// Once we've hit our add, then we are done
|
4439
4928
|
if (match[0] === '+') {
|
4440
4929
|
break;
|
4441
4930
|
}
|
4442
|
-
|
4443
4931
|
contextChanges = contextChanges || change[0] !== ' ';
|
4444
4932
|
merged.push(match);
|
4445
|
-
matchIndex++;
|
4446
|
-
// to pull in the remaining context after this
|
4933
|
+
matchIndex++;
|
4447
4934
|
|
4935
|
+
// Consume any additions in the other block as a conflict to attempt
|
4936
|
+
// to pull in the remaining context after this
|
4448
4937
|
if (change[0] === '+') {
|
4449
4938
|
conflicted = true;
|
4450
|
-
|
4451
4939
|
while (change[0] === '+') {
|
4452
4940
|
changes.push(change);
|
4453
4941
|
change = state.lines[++state.index];
|
4454
4942
|
}
|
4455
4943
|
}
|
4456
|
-
|
4457
4944
|
if (match.substr(1) === change.substr(1)) {
|
4458
4945
|
changes.push(change);
|
4459
4946
|
state.index++;
|
@@ -4461,44 +4948,35 @@ function lib_collectContext(state, matchChanges) {
|
|
4461
4948
|
conflicted = true;
|
4462
4949
|
}
|
4463
4950
|
}
|
4464
|
-
|
4465
4951
|
if ((matchChanges[matchIndex] || '')[0] === '+' && contextChanges) {
|
4466
4952
|
conflicted = true;
|
4467
4953
|
}
|
4468
|
-
|
4469
4954
|
if (conflicted) {
|
4470
4955
|
return changes;
|
4471
4956
|
}
|
4472
|
-
|
4473
4957
|
while (matchIndex < matchChanges.length) {
|
4474
4958
|
merged.push(matchChanges[matchIndex++]);
|
4475
4959
|
}
|
4476
|
-
|
4477
4960
|
return {
|
4478
4961
|
merged: merged,
|
4479
4962
|
changes: changes
|
4480
4963
|
};
|
4481
4964
|
}
|
4482
|
-
|
4483
4965
|
function lib_allRemoves(changes) {
|
4484
4966
|
return changes.reduce(function (prev, change) {
|
4485
4967
|
return prev && change[0] === '-';
|
4486
4968
|
}, true);
|
4487
4969
|
}
|
4488
|
-
|
4489
4970
|
function lib_skipRemoveSuperset(state, removeChanges, delta) {
|
4490
4971
|
for (var i = 0; i < delta; i++) {
|
4491
4972
|
var changeContent = removeChanges[removeChanges.length - delta + i].substr(1);
|
4492
|
-
|
4493
4973
|
if (state.lines[state.index + i] !== ' ' + changeContent) {
|
4494
4974
|
return false;
|
4495
4975
|
}
|
4496
4976
|
}
|
4497
|
-
|
4498
4977
|
state.index += delta;
|
4499
4978
|
return true;
|
4500
4979
|
}
|
4501
|
-
|
4502
4980
|
function lib_calcOldNewLineCount(lines) {
|
4503
4981
|
var oldLines = 0;
|
4504
4982
|
var newLines = 0;
|
@@ -4506,7 +4984,6 @@ function lib_calcOldNewLineCount(lines) {
|
|
4506
4984
|
if (typeof line !== 'string') {
|
4507
4985
|
var myCount = lib_calcOldNewLineCount(line.mine);
|
4508
4986
|
var theirCount = lib_calcOldNewLineCount(line.theirs);
|
4509
|
-
|
4510
4987
|
if (oldLines !== undefined) {
|
4511
4988
|
if (myCount.oldLines === theirCount.oldLines) {
|
4512
4989
|
oldLines += myCount.oldLines;
|
@@ -4514,7 +4991,6 @@ function lib_calcOldNewLineCount(lines) {
|
|
4514
4991
|
oldLines = undefined;
|
4515
4992
|
}
|
4516
4993
|
}
|
4517
|
-
|
4518
4994
|
if (newLines !== undefined) {
|
4519
4995
|
if (myCount.newLines === theirCount.newLines) {
|
4520
4996
|
newLines += myCount.newLines;
|
@@ -4526,7 +5002,6 @@ function lib_calcOldNewLineCount(lines) {
|
|
4526
5002
|
if (newLines !== undefined && (line[0] === '+' || line[0] === ' ')) {
|
4527
5003
|
newLines++;
|
4528
5004
|
}
|
4529
|
-
|
4530
5005
|
if (oldLines !== undefined && (line[0] === '-' || line[0] === ' ')) {
|
4531
5006
|
oldLines++;
|
4532
5007
|
}
|
@@ -4538,15 +5013,42 @@ function lib_calcOldNewLineCount(lines) {
|
|
4538
5013
|
};
|
4539
5014
|
}
|
4540
5015
|
|
5016
|
+
function reversePatch(structuredPatch) {
|
5017
|
+
if (Array.isArray(structuredPatch)) {
|
5018
|
+
return structuredPatch.map(reversePatch).reverse();
|
5019
|
+
}
|
5020
|
+
return _objectSpread2(_objectSpread2({}, structuredPatch), {}, {
|
5021
|
+
oldFileName: structuredPatch.newFileName,
|
5022
|
+
oldHeader: structuredPatch.newHeader,
|
5023
|
+
newFileName: structuredPatch.oldFileName,
|
5024
|
+
newHeader: structuredPatch.oldHeader,
|
5025
|
+
hunks: structuredPatch.hunks.map(function (hunk) {
|
5026
|
+
return {
|
5027
|
+
oldLines: hunk.newLines,
|
5028
|
+
oldStart: hunk.newStart,
|
5029
|
+
newLines: hunk.oldLines,
|
5030
|
+
newStart: hunk.oldStart,
|
5031
|
+
lines: hunk.lines.map(function (l) {
|
5032
|
+
if (l.startsWith('-')) {
|
5033
|
+
return "+".concat(l.slice(1));
|
5034
|
+
}
|
5035
|
+
if (l.startsWith('+')) {
|
5036
|
+
return "-".concat(l.slice(1));
|
5037
|
+
}
|
5038
|
+
return l;
|
5039
|
+
})
|
5040
|
+
};
|
5041
|
+
})
|
5042
|
+
});
|
5043
|
+
}
|
5044
|
+
|
4541
5045
|
// See: http://code.google.com/p/google-diff-match-patch/wiki/API
|
4542
5046
|
function lib_convertChangesToDMP(changes) {
|
4543
5047
|
var ret = [],
|
4544
|
-
|
4545
|
-
|
4546
|
-
|
5048
|
+
change,
|
5049
|
+
operation;
|
4547
5050
|
for (var i = 0; i < changes.length; i++) {
|
4548
5051
|
change = changes[i];
|
4549
|
-
|
4550
5052
|
if (change.added) {
|
4551
5053
|
operation = 1;
|
4552
5054
|
} else if (change.removed) {
|
@@ -4554,37 +5056,29 @@ function lib_convertChangesToDMP(changes) {
|
|
4554
5056
|
} else {
|
4555
5057
|
operation = 0;
|
4556
5058
|
}
|
4557
|
-
|
4558
5059
|
ret.push([operation, change.value]);
|
4559
5060
|
}
|
4560
|
-
|
4561
5061
|
return ret;
|
4562
5062
|
}
|
4563
5063
|
|
4564
5064
|
function lib_convertChangesToXML(changes) {
|
4565
5065
|
var ret = [];
|
4566
|
-
|
4567
5066
|
for (var i = 0; i < changes.length; i++) {
|
4568
5067
|
var change = changes[i];
|
4569
|
-
|
4570
5068
|
if (change.added) {
|
4571
5069
|
ret.push('<ins>');
|
4572
5070
|
} else if (change.removed) {
|
4573
5071
|
ret.push('<del>');
|
4574
5072
|
}
|
4575
|
-
|
4576
5073
|
ret.push(lib_escapeHTML(change.value));
|
4577
|
-
|
4578
5074
|
if (change.added) {
|
4579
5075
|
ret.push('</ins>');
|
4580
5076
|
} else if (change.removed) {
|
4581
5077
|
ret.push('</del>');
|
4582
5078
|
}
|
4583
5079
|
}
|
4584
|
-
|
4585
5080
|
return ret.join('');
|
4586
5081
|
}
|
4587
|
-
|
4588
5082
|
function lib_escapeHTML(s) {
|
4589
5083
|
var n = s;
|
4590
5084
|
n = n.replace(/&/g, '&');
|
@@ -4921,6 +5415,7 @@ class FileListRenderer {
|
|
4921
5415
|
|
4922
5416
|
|
4923
5417
|
|
5418
|
+
|
4924
5419
|
const defaultLineByLineRendererConfig = Object.assign(Object.assign({}, defaultRenderConfig), { renderNothingWhenEmpty: false, matchingMaxComparisons: 2500, maxLineSizeInBlockForComparison: 200 });
|
4925
5420
|
const genericTemplatesPath = 'generic';
|
4926
5421
|
const line_by_line_renderer_baseTemplatesPath = 'line-by-line';
|
@@ -5051,7 +5546,7 @@ class LineByLineRenderer {
|
|
5051
5546
|
}
|
5052
5547
|
applyRematchMatching(oldLines, newLines, matcher) {
|
5053
5548
|
const comparisons = oldLines.length * newLines.length;
|
5054
|
-
const maxLineSizeInBlock =
|
5549
|
+
const maxLineSizeInBlock = max(oldLines.concat(newLines).map(elem => elem.content.length));
|
5055
5550
|
const doMatching = comparisons < this.config.matchingMaxComparisons &&
|
5056
5551
|
maxLineSizeInBlock < this.config.maxLineSizeInBlockForComparison &&
|
5057
5552
|
(this.config.matching === 'lines' || this.config.matching === 'words');
|
@@ -5121,6 +5616,7 @@ class LineByLineRenderer {
|
|
5121
5616
|
|
5122
5617
|
|
5123
5618
|
|
5619
|
+
|
5124
5620
|
const defaultSideBySideRendererConfig = Object.assign(Object.assign({}, defaultRenderConfig), { renderNothingWhenEmpty: false, matchingMaxComparisons: 2500, maxLineSizeInBlockForComparison: 200 });
|
5125
5621
|
const side_by_side_renderer_genericTemplatesPath = 'generic';
|
5126
5622
|
const side_by_side_renderer_baseTemplatesPath = 'side-by-side';
|
@@ -5260,7 +5756,7 @@ class SideBySideRenderer {
|
|
5260
5756
|
}
|
5261
5757
|
applyRematchMatching(oldLines, newLines, matcher) {
|
5262
5758
|
const comparisons = oldLines.length * newLines.length;
|
5263
|
-
const maxLineSizeInBlock =
|
5759
|
+
const maxLineSizeInBlock = max(oldLines.concat(newLines).map(elem => elem.content.length));
|
5264
5760
|
const doMatching = comparisons < this.config.matchingMaxComparisons &&
|
5265
5761
|
maxLineSizeInBlock < this.config.maxLineSizeInBlockForComparison &&
|
5266
5762
|
(this.config.matching === 'lines' || this.config.matching === 'words');
|
@@ -5390,7 +5886,7 @@ class HoganJsUtils {
|
|
5390
5886
|
const template = this.preCompiledTemplates[templateKey];
|
5391
5887
|
return template.render(params, partials, indent);
|
5392
5888
|
}
|
5393
|
-
catch (
|
5889
|
+
catch (_e) {
|
5394
5890
|
throw new Error(`Could not find template to render '${templateKey}'`);
|
5395
5891
|
}
|
5396
5892
|
}
|