bkui-vue 2.0.2-beta.47 → 2.0.2-beta.49
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs.js +42 -47
- package/dist/index.esm.js +7009 -7201
- package/dist/index.umd.js +45 -50
- package/lib/affix/index.js +14 -12
- package/lib/alert/index.js +14 -12
- package/lib/animate-number/index.js +12 -11
- package/lib/backtop/index.js +11 -10
- package/lib/badge/index.js +14 -12
- package/lib/breadcrumb/index.js +14 -12
- package/lib/button/index.js +14 -12
- package/lib/card/index.js +14 -12
- package/lib/cascader/index.js +101 -94
- package/lib/checkbox/index.js +35 -37
- package/lib/code-diff/index.js +674 -1170
- package/lib/collapse/index.js +42 -44
- package/lib/collapse-transition/index.js +4 -3
- package/lib/color-picker/index.js +60 -59
- package/lib/config-provider/index.js +14 -12
- package/lib/container/index.js +25 -21
- package/lib/date-picker/index.js +51 -58
- package/lib/dialog/index.js +441 -43
- package/lib/directives/index.js +34 -37
- package/lib/divider/index.js +3 -2
- package/lib/dropdown/index.js +21 -17
- package/lib/exception/index.js +17 -14
- package/lib/fixed-navbar/index.js +3 -2
- package/lib/form/index.js +34 -37
- package/lib/icon/angle-double-down-line.js +36 -29
- package/lib/icon/angle-double-left-line.js +36 -29
- package/lib/icon/angle-double-left.js +36 -29
- package/lib/icon/angle-double-right-line.js +36 -29
- package/lib/icon/angle-double-right.js +36 -29
- package/lib/icon/angle-double-up-line.js +36 -29
- package/lib/icon/angle-down-fill.js +36 -29
- package/lib/icon/angle-down-line.js +36 -29
- package/lib/icon/angle-down.js +36 -29
- package/lib/icon/angle-left.js +36 -29
- package/lib/icon/angle-right.js +36 -29
- package/lib/icon/angle-up-fill.js +36 -29
- package/lib/icon/angle-up.js +36 -29
- package/lib/icon/archive-fill.js +36 -29
- package/lib/icon/arrows-left.js +36 -29
- package/lib/icon/arrows-right.js +36 -29
- package/lib/icon/assistant.js +36 -29
- package/lib/icon/audio-fill.js +36 -29
- package/lib/icon/bk.js +36 -29
- package/lib/icon/circle.js +36 -29
- package/lib/icon/close-line.js +36 -29
- package/lib/icon/close.js +36 -29
- package/lib/icon/code.js +36 -29
- package/lib/icon/cog-shape.js +36 -29
- package/lib/icon/collapse-left.js +36 -29
- package/lib/icon/copy-shape.js +36 -29
- package/lib/icon/copy.js +36 -29
- package/lib/icon/data-shape.js +36 -29
- package/lib/icon/del.js +36 -29
- package/lib/icon/doc-fill.js +36 -29
- package/lib/icon/done.js +36 -29
- package/lib/icon/down-shape.js +36 -29
- package/lib/icon/down-small.js +36 -29
- package/lib/icon/edit-line.js +36 -29
- package/lib/icon/ellipsis.js +36 -29
- package/lib/icon/enlarge-line.js +36 -29
- package/lib/icon/error.js +36 -29
- package/lib/icon/excel-fill.js +36 -29
- package/lib/icon/exclamation-circle-shape.js +36 -29
- package/lib/icon/eye.js +36 -29
- package/lib/icon/filliscreen-line.js +36 -29
- package/lib/icon/fix-line.js +36 -29
- package/lib/icon/fix-shape.js +36 -29
- package/lib/icon/folder-open.js +36 -29
- package/lib/icon/folder-shape-open.js +36 -29
- package/lib/icon/folder-shape.js +36 -29
- package/lib/icon/folder.js +36 -29
- package/lib/icon/funnel.js +36 -29
- package/lib/icon/grag-fill.js +36 -29
- package/lib/icon/help-document-fill.js +36 -29
- package/lib/icon/help-fill.js +36 -29
- package/lib/icon/help.js +36 -29
- package/lib/icon/image-fill.js +36 -29
- package/lib/icon/img-error.js +36 -29
- package/lib/icon/img-placehoulder.js +36 -29
- package/lib/icon/info-line.js +36 -29
- package/lib/icon/info.js +36 -29
- package/lib/icon/left-shape.js +36 -29
- package/lib/icon/left-turn-line.js +36 -29
- package/lib/icon/loading.js +36 -29
- package/lib/icon/narrow-line.js +36 -29
- package/lib/icon/original.js +36 -29
- package/lib/icon/pdf-fill.js +36 -29
- package/lib/icon/play-shape.js +36 -29
- package/lib/icon/plus.js +36 -29
- package/lib/icon/ppt-fill.js +36 -29
- package/lib/icon/qq.js +36 -29
- package/lib/icon/right-shape.js +36 -29
- package/lib/icon/right-turn-line.js +36 -29
- package/lib/icon/search.js +36 -29
- package/lib/icon/share.js +36 -29
- package/lib/icon/spinner.js +36 -29
- package/lib/icon/success.js +36 -29
- package/lib/icon/switcher-loading.js +36 -29
- package/lib/icon/text-all.js +36 -29
- package/lib/icon/text-file.js +36 -29
- package/lib/icon/text-fill.js +36 -29
- package/lib/icon/transfer.js +36 -29
- package/lib/icon/tree-application-shape.js +36 -29
- package/lib/icon/unfull-screen.js +36 -29
- package/lib/icon/unvisible.js +36 -29
- package/lib/icon/up-shape.js +36 -29
- package/lib/icon/upload.js +36 -29
- package/lib/icon/video-fill.js +36 -29
- package/lib/icon/warn.js +36 -29
- package/lib/icon/weixin-pro.js +36 -29
- package/lib/icon/weixin.js +36 -29
- package/lib/image/index.js +47 -49
- package/lib/index.js +1 -1
- package/lib/info-box/index.js +461 -62
- package/lib/input/index.js +60 -62
- package/lib/link/index.js +2 -2
- package/lib/loading/index.js +25 -22
- package/lib/menu/index.js +34 -36
- package/lib/message/index.js +31 -34
- package/lib/modal/index.js +14 -12
- package/lib/navigation/index.js +9 -7
- package/lib/notify/index.js +14 -12
- package/lib/overflow-title/index.js +10 -9
- package/lib/pagination/index.js +29 -32
- package/lib/pop-confirm/index.js +5 -4
- package/lib/popover/index.js +136 -209
- package/lib/popover2/index.js +2 -2
- package/lib/process/index.js +452 -59
- package/lib/progress/index.js +19 -16
- package/lib/radio/index.js +34 -37
- package/lib/rate/index.js +14 -12
- package/lib/resize-layout/index.js +20 -18
- package/lib/scrollbar/index.js +58 -51
- package/lib/search-select/index.js +595 -222
- package/lib/select/index.js +463 -111
- package/lib/shared/index.js +95 -83
- package/lib/sideslider/index.js +441 -43
- package/lib/slider/index.js +446 -49
- package/lib/steps/index.js +458 -60
- package/lib/swiper/index.js +17 -15
- package/lib/switcher/index.js +14 -12
- package/lib/tab/index.js +40 -42
- package/lib/table/index.js +501 -152
- package/lib/tag/index.js +14 -12
- package/lib/tag-input/index.js +41 -47
- package/lib/timeline/index.js +0 -1
- package/lib/transfer/index.js +17 -22
- package/lib/tree/index.js +66 -73
- package/lib/upload/index.js +487 -136
- package/lib/virtual-render/index.js +51 -47
- package/package.json +1 -2
package/lib/code-diff/index.js
CHANGED
@@ -1004,10 +1004,9 @@ var x = y => { var x = {}; __webpack_require__.d(x, y); return x; }
|
|
1004
1004
|
var y = x => () => x
|
1005
1005
|
const shared_namespaceObject = x({ ["PropTypes"]: () => __WEBPACK_EXTERNAL_MODULE_bkui_vue_lib_shared_edbdfb03__.PropTypes, ["classes"]: () => __WEBPACK_EXTERNAL_MODULE_bkui_vue_lib_shared_edbdfb03__.classes, ["stringEnum"]: () => __WEBPACK_EXTERNAL_MODULE_bkui_vue_lib_shared_edbdfb03__.stringEnum, ["withInstall"]: () => __WEBPACK_EXTERNAL_MODULE_bkui_vue_lib_shared_edbdfb03__.withInstall });
|
1006
1006
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/arrayWithHoles.js
|
1007
|
-
function _arrayWithHoles(
|
1008
|
-
if (Array.isArray(
|
1007
|
+
function _arrayWithHoles(arr) {
|
1008
|
+
if (Array.isArray(arr)) return arr;
|
1009
1009
|
}
|
1010
|
-
|
1011
1010
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/iterableToArrayLimit.js
|
1012
1011
|
function _iterableToArrayLimit(r, l) {
|
1013
1012
|
var t = null == r ? null : "undefined" != typeof Symbol && r[Symbol.iterator] || r["@@iterator"];
|
@@ -1036,38 +1035,34 @@ function _iterableToArrayLimit(r, l) {
|
|
1036
1035
|
return a;
|
1037
1036
|
}
|
1038
1037
|
}
|
1039
|
-
|
1040
1038
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/arrayLikeToArray.js
|
1041
|
-
function _arrayLikeToArray(
|
1042
|
-
(
|
1043
|
-
for (var
|
1044
|
-
return
|
1039
|
+
function _arrayLikeToArray(arr, len) {
|
1040
|
+
if (len == null || len > arr.length) len = arr.length;
|
1041
|
+
for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
|
1042
|
+
return arr2;
|
1045
1043
|
}
|
1046
|
-
|
1047
1044
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/unsupportedIterableToArray.js
|
1048
1045
|
|
1049
|
-
function _unsupportedIterableToArray(
|
1050
|
-
if (
|
1051
|
-
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1046
|
+
function _unsupportedIterableToArray(o, minLen) {
|
1047
|
+
if (!o) return;
|
1048
|
+
if (typeof o === "string") return _arrayLikeToArray(o, minLen);
|
1049
|
+
var n = Object.prototype.toString.call(o).slice(8, -1);
|
1050
|
+
if (n === "Object" && o.constructor) n = o.constructor.name;
|
1051
|
+
if (n === "Map" || n === "Set") return Array.from(o);
|
1052
|
+
if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
|
1055
1053
|
}
|
1056
|
-
|
1057
1054
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/nonIterableRest.js
|
1058
1055
|
function _nonIterableRest() {
|
1059
1056
|
throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
|
1060
1057
|
}
|
1061
|
-
|
1062
1058
|
;// CONCATENATED MODULE: ../../node_modules/@babel/runtime/helpers/esm/slicedToArray.js
|
1063
1059
|
|
1064
1060
|
|
1065
1061
|
|
1066
1062
|
|
1067
|
-
function _slicedToArray(
|
1068
|
-
return _arrayWithHoles(
|
1063
|
+
function _slicedToArray(arr, i) {
|
1064
|
+
return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _unsupportedIterableToArray(arr, i) || _nonIterableRest();
|
1069
1065
|
}
|
1070
|
-
|
1071
1066
|
;// CONCATENATED MODULE: external "vue"
|
1072
1067
|
var external_vue_x = y => { var x = {}; __webpack_require__.d(x, y); return x; }
|
1073
1068
|
var external_vue_y = x => () => x
|
@@ -2698,14 +2693,6 @@ function hashCode(text) {
|
|
2698
2693
|
}
|
2699
2694
|
return hash;
|
2700
2695
|
}
|
2701
|
-
function max(arr) {
|
2702
|
-
const length = arr.length;
|
2703
|
-
let max = -Infinity;
|
2704
|
-
for (let i = 0; i < length; i++) {
|
2705
|
-
max = Math.max(max, arr[i]);
|
2706
|
-
}
|
2707
|
-
return max;
|
2708
|
-
}
|
2709
2696
|
//# sourceMappingURL=utils.js.map
|
2710
2697
|
;// CONCATENATED MODULE: ../../node_modules/diff2html/lib-esm/diff-parser.js
|
2711
2698
|
|
@@ -3022,8 +3009,7 @@ function parse(diffInput, config = {}) {
|
|
3022
3009
|
else if ((values = index.exec(line))) {
|
3023
3010
|
currentFile.checksumBefore = values[1];
|
3024
3011
|
currentFile.checksumAfter = values[2];
|
3025
|
-
|
3026
|
-
currentFile.mode = values[3];
|
3012
|
+
values[3] && (currentFile.mode = values[3]);
|
3027
3013
|
}
|
3028
3014
|
else if ((values = combinedIndex.exec(line))) {
|
3029
3015
|
currentFile.checksumBefore = [values[2], values[3]];
|
@@ -3051,245 +3037,211 @@ function parse(diffInput, config = {}) {
|
|
3051
3037
|
function lib_Diff() {}
|
3052
3038
|
lib_Diff.prototype = {
|
3053
3039
|
diff: function diff(oldString, newString) {
|
3054
|
-
var _options$timeout;
|
3055
3040
|
var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
|
3056
3041
|
var callback = options.callback;
|
3042
|
+
|
3057
3043
|
if (typeof options === 'function') {
|
3058
3044
|
callback = options;
|
3059
3045
|
options = {};
|
3060
3046
|
}
|
3047
|
+
|
3048
|
+
this.options = options;
|
3061
3049
|
var self = this;
|
3050
|
+
|
3062
3051
|
function done(value) {
|
3063
|
-
value = self.postProcess(value, options);
|
3064
3052
|
if (callback) {
|
3065
3053
|
setTimeout(function () {
|
3066
|
-
callback(value);
|
3054
|
+
callback(undefined, value);
|
3067
3055
|
}, 0);
|
3068
3056
|
return true;
|
3069
3057
|
} else {
|
3070
3058
|
return value;
|
3071
3059
|
}
|
3072
|
-
}
|
3060
|
+
} // Allow subclasses to massage the input prior to running
|
3073
3061
|
|
3074
|
-
|
3075
|
-
oldString = this.castInput(oldString
|
3076
|
-
newString = this.castInput(newString
|
3077
|
-
oldString = this.removeEmpty(this.tokenize(oldString
|
3078
|
-
newString = this.removeEmpty(this.tokenize(newString
|
3062
|
+
|
3063
|
+
oldString = this.castInput(oldString);
|
3064
|
+
newString = this.castInput(newString);
|
3065
|
+
oldString = this.removeEmpty(this.tokenize(oldString));
|
3066
|
+
newString = this.removeEmpty(this.tokenize(newString));
|
3079
3067
|
var newLen = newString.length,
|
3080
|
-
|
3068
|
+
oldLen = oldString.length;
|
3081
3069
|
var editLength = 1;
|
3082
3070
|
var maxEditLength = newLen + oldLen;
|
3083
|
-
|
3071
|
+
|
3072
|
+
if (options.maxEditLength) {
|
3084
3073
|
maxEditLength = Math.min(maxEditLength, options.maxEditLength);
|
3085
3074
|
}
|
3086
|
-
|
3087
|
-
var abortAfterTimestamp = Date.now() + maxExecutionTime;
|
3075
|
+
|
3088
3076
|
var bestPath = [{
|
3089
|
-
|
3090
|
-
|
3091
|
-
}];
|
3077
|
+
newPos: -1,
|
3078
|
+
components: []
|
3079
|
+
}]; // Seed editLength = 0, i.e. the content starts with the same values
|
3080
|
+
|
3081
|
+
var oldPos = this.extractCommon(bestPath[0], newString, oldString, 0);
|
3092
3082
|
|
3093
|
-
|
3094
|
-
var newPos = this.extractCommon(bestPath[0], newString, oldString, 0, options);
|
3095
|
-
if (bestPath[0].oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
|
3083
|
+
if (bestPath[0].newPos + 1 >= newLen && oldPos + 1 >= oldLen) {
|
3096
3084
|
// Identity per the equality and tokenizer
|
3097
|
-
return done(
|
3098
|
-
|
3099
|
-
|
3100
|
-
|
3101
|
-
//
|
3102
|
-
|
3103
|
-
|
3104
|
-
// Similarly, once we've reached the bottom of the edit graph, there's no
|
3105
|
-
// point considering moves to lower diagonals.
|
3106
|
-
// We record this fact by setting minDiagonalToConsider and
|
3107
|
-
// maxDiagonalToConsider to some finite value once we've hit the edge of
|
3108
|
-
// the edit graph.
|
3109
|
-
// This optimization is not faithful to the original algorithm presented in
|
3110
|
-
// Myers's paper, which instead pointlessly extends D-paths off the end of
|
3111
|
-
// the edit graph - see page 7 of Myers's paper which notes this point
|
3112
|
-
// explicitly and illustrates it with a diagram. This has major performance
|
3113
|
-
// implications for some common scenarios. For instance, to compute a diff
|
3114
|
-
// where the new text simply appends d characters on the end of the
|
3115
|
-
// original text of length n, the true Myers algorithm will take O(n+d^2)
|
3116
|
-
// time while this optimization needs only O(n+d) time.
|
3117
|
-
var minDiagonalToConsider = -Infinity,
|
3118
|
-
maxDiagonalToConsider = Infinity;
|
3119
|
-
|
3120
|
-
// Main worker method. checks all permutations of a given edit length for acceptance.
|
3085
|
+
return done([{
|
3086
|
+
value: this.join(newString),
|
3087
|
+
count: newString.length
|
3088
|
+
}]);
|
3089
|
+
} // Main worker method. checks all permutations of a given edit length for acceptance.
|
3090
|
+
|
3091
|
+
|
3121
3092
|
function execEditLength() {
|
3122
|
-
for (var diagonalPath =
|
3093
|
+
for (var diagonalPath = -1 * editLength; diagonalPath <= editLength; diagonalPath += 2) {
|
3123
3094
|
var basePath = void 0;
|
3124
|
-
|
3125
|
-
|
3126
|
-
|
3095
|
+
|
3096
|
+
var addPath = bestPath[diagonalPath - 1],
|
3097
|
+
removePath = bestPath[diagonalPath + 1],
|
3098
|
+
_oldPos = (removePath ? removePath.newPos : 0) - diagonalPath;
|
3099
|
+
|
3100
|
+
if (addPath) {
|
3127
3101
|
// No one else is going to attempt to use this value, clear it
|
3128
3102
|
bestPath[diagonalPath - 1] = undefined;
|
3129
3103
|
}
|
3130
|
-
|
3131
|
-
|
3132
|
-
|
3133
|
-
|
3134
|
-
canAdd = addPath && 0 <= addPathNewPos && addPathNewPos < newLen;
|
3135
|
-
}
|
3136
|
-
var canRemove = removePath && removePath.oldPos + 1 < oldLen;
|
3104
|
+
|
3105
|
+
var canAdd = addPath && addPath.newPos + 1 < newLen,
|
3106
|
+
canRemove = removePath && 0 <= _oldPos && _oldPos < oldLen;
|
3107
|
+
|
3137
3108
|
if (!canAdd && !canRemove) {
|
3138
3109
|
// If this path is a terminal then prune
|
3139
3110
|
bestPath[diagonalPath] = undefined;
|
3140
3111
|
continue;
|
3141
|
-
}
|
3142
|
-
|
3143
|
-
// Select the diagonal that we want to branch from. We select the prior
|
3144
|
-
// path whose position in the old string is the farthest from the origin
|
3112
|
+
} // Select the diagonal that we want to branch from. We select the prior
|
3113
|
+
// path whose position in the new string is the farthest from the origin
|
3145
3114
|
// and does not pass the bounds of the diff graph
|
3146
|
-
|
3147
|
-
|
3115
|
+
|
3116
|
+
|
3117
|
+
if (!canAdd || canRemove && addPath.newPos < removePath.newPos) {
|
3118
|
+
basePath = lib_clonePath(removePath);
|
3119
|
+
self.pushComponent(basePath.components, undefined, true);
|
3148
3120
|
} else {
|
3149
|
-
basePath =
|
3121
|
+
basePath = addPath; // No need to clone, we've pulled it from the list
|
3122
|
+
|
3123
|
+
basePath.newPos++;
|
3124
|
+
self.pushComponent(basePath.components, true, undefined);
|
3150
3125
|
}
|
3151
|
-
|
3152
|
-
|
3153
|
-
|
3154
|
-
|
3126
|
+
|
3127
|
+
_oldPos = self.extractCommon(basePath, newString, oldString, diagonalPath); // If we have hit the end of both strings, then we are done
|
3128
|
+
|
3129
|
+
if (basePath.newPos + 1 >= newLen && _oldPos + 1 >= oldLen) {
|
3130
|
+
return done(lib_buildValues(self, basePath.components, newString, oldString, self.useLongestToken));
|
3155
3131
|
} else {
|
3132
|
+
// Otherwise track this path as a potential candidate and continue.
|
3156
3133
|
bestPath[diagonalPath] = basePath;
|
3157
|
-
if (basePath.oldPos + 1 >= oldLen) {
|
3158
|
-
maxDiagonalToConsider = Math.min(maxDiagonalToConsider, diagonalPath - 1);
|
3159
|
-
}
|
3160
|
-
if (newPos + 1 >= newLen) {
|
3161
|
-
minDiagonalToConsider = Math.max(minDiagonalToConsider, diagonalPath + 1);
|
3162
|
-
}
|
3163
3134
|
}
|
3164
3135
|
}
|
3165
|
-
editLength++;
|
3166
|
-
}
|
3167
3136
|
|
3168
|
-
|
3137
|
+
editLength++;
|
3138
|
+
} // Performs the length of edit iteration. Is a bit fugly as this has to support the
|
3169
3139
|
// sync and async mode which is never fun. Loops over execEditLength until a value
|
3170
3140
|
// is produced, or until the edit length exceeds options.maxEditLength (if given),
|
3171
3141
|
// in which case it will return undefined.
|
3142
|
+
|
3143
|
+
|
3172
3144
|
if (callback) {
|
3173
3145
|
(function exec() {
|
3174
3146
|
setTimeout(function () {
|
3175
|
-
if (editLength > maxEditLength
|
3147
|
+
if (editLength > maxEditLength) {
|
3176
3148
|
return callback();
|
3177
3149
|
}
|
3150
|
+
|
3178
3151
|
if (!execEditLength()) {
|
3179
3152
|
exec();
|
3180
3153
|
}
|
3181
3154
|
}, 0);
|
3182
3155
|
})();
|
3183
3156
|
} else {
|
3184
|
-
while (editLength <= maxEditLength
|
3157
|
+
while (editLength <= maxEditLength) {
|
3185
3158
|
var ret = execEditLength();
|
3159
|
+
|
3186
3160
|
if (ret) {
|
3187
3161
|
return ret;
|
3188
3162
|
}
|
3189
3163
|
}
|
3190
3164
|
}
|
3191
3165
|
},
|
3192
|
-
|
3193
|
-
var last =
|
3194
|
-
|
3195
|
-
|
3196
|
-
|
3197
|
-
|
3198
|
-
|
3199
|
-
|
3200
|
-
|
3201
|
-
|
3202
|
-
}
|
3166
|
+
pushComponent: function pushComponent(components, added, removed) {
|
3167
|
+
var last = components[components.length - 1];
|
3168
|
+
|
3169
|
+
if (last && last.added === added && last.removed === removed) {
|
3170
|
+
// We need to clone here as the component clone operation is just
|
3171
|
+
// as shallow array clone
|
3172
|
+
components[components.length - 1] = {
|
3173
|
+
count: last.count + 1,
|
3174
|
+
added: added,
|
3175
|
+
removed: removed
|
3203
3176
|
};
|
3204
3177
|
} else {
|
3205
|
-
|
3206
|
-
|
3207
|
-
|
3208
|
-
|
3209
|
-
|
3210
|
-
removed: removed,
|
3211
|
-
previousComponent: last
|
3212
|
-
}
|
3213
|
-
};
|
3178
|
+
components.push({
|
3179
|
+
count: 1,
|
3180
|
+
added: added,
|
3181
|
+
removed: removed
|
3182
|
+
});
|
3214
3183
|
}
|
3215
3184
|
},
|
3216
|
-
extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath
|
3185
|
+
extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath) {
|
3217
3186
|
var newLen = newString.length,
|
3218
|
-
|
3219
|
-
|
3220
|
-
|
3221
|
-
|
3222
|
-
|
3187
|
+
oldLen = oldString.length,
|
3188
|
+
newPos = basePath.newPos,
|
3189
|
+
oldPos = newPos - diagonalPath,
|
3190
|
+
commonCount = 0;
|
3191
|
+
|
3192
|
+
while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(newString[newPos + 1], oldString[oldPos + 1])) {
|
3223
3193
|
newPos++;
|
3224
3194
|
oldPos++;
|
3225
3195
|
commonCount++;
|
3226
|
-
if (options.oneChangePerToken) {
|
3227
|
-
basePath.lastComponent = {
|
3228
|
-
count: 1,
|
3229
|
-
previousComponent: basePath.lastComponent,
|
3230
|
-
added: false,
|
3231
|
-
removed: false
|
3232
|
-
};
|
3233
|
-
}
|
3234
3196
|
}
|
3235
|
-
|
3236
|
-
|
3237
|
-
|
3238
|
-
|
3239
|
-
|
3240
|
-
removed: false
|
3241
|
-
};
|
3197
|
+
|
3198
|
+
if (commonCount) {
|
3199
|
+
basePath.components.push({
|
3200
|
+
count: commonCount
|
3201
|
+
});
|
3242
3202
|
}
|
3243
|
-
|
3244
|
-
|
3203
|
+
|
3204
|
+
basePath.newPos = newPos;
|
3205
|
+
return oldPos;
|
3245
3206
|
},
|
3246
|
-
equals: function equals(left, right
|
3247
|
-
if (options.comparator) {
|
3248
|
-
return options.comparator(left, right);
|
3207
|
+
equals: function equals(left, right) {
|
3208
|
+
if (this.options.comparator) {
|
3209
|
+
return this.options.comparator(left, right);
|
3249
3210
|
} else {
|
3250
|
-
return left === right || options.ignoreCase && left.toLowerCase() === right.toLowerCase();
|
3211
|
+
return left === right || this.options.ignoreCase && left.toLowerCase() === right.toLowerCase();
|
3251
3212
|
}
|
3252
3213
|
},
|
3253
3214
|
removeEmpty: function removeEmpty(array) {
|
3254
3215
|
var ret = [];
|
3216
|
+
|
3255
3217
|
for (var i = 0; i < array.length; i++) {
|
3256
3218
|
if (array[i]) {
|
3257
3219
|
ret.push(array[i]);
|
3258
3220
|
}
|
3259
3221
|
}
|
3222
|
+
|
3260
3223
|
return ret;
|
3261
3224
|
},
|
3262
3225
|
castInput: function castInput(value) {
|
3263
3226
|
return value;
|
3264
3227
|
},
|
3265
3228
|
tokenize: function tokenize(value) {
|
3266
|
-
return
|
3229
|
+
return value.split('');
|
3267
3230
|
},
|
3268
3231
|
join: function join(chars) {
|
3269
3232
|
return chars.join('');
|
3270
|
-
},
|
3271
|
-
postProcess: function postProcess(changeObjects) {
|
3272
|
-
return changeObjects;
|
3273
3233
|
}
|
3274
3234
|
};
|
3275
|
-
|
3276
|
-
|
3277
|
-
// array in the right order:
|
3278
|
-
var components = [];
|
3279
|
-
var nextComponent;
|
3280
|
-
while (lastComponent) {
|
3281
|
-
components.push(lastComponent);
|
3282
|
-
nextComponent = lastComponent.previousComponent;
|
3283
|
-
delete lastComponent.previousComponent;
|
3284
|
-
lastComponent = nextComponent;
|
3285
|
-
}
|
3286
|
-
components.reverse();
|
3235
|
+
|
3236
|
+
function lib_buildValues(diff, components, newString, oldString, useLongestToken) {
|
3287
3237
|
var componentPos = 0,
|
3288
|
-
|
3289
|
-
|
3290
|
-
|
3238
|
+
componentLen = components.length,
|
3239
|
+
newPos = 0,
|
3240
|
+
oldPos = 0;
|
3241
|
+
|
3291
3242
|
for (; componentPos < componentLen; componentPos++) {
|
3292
3243
|
var component = components[componentPos];
|
3244
|
+
|
3293
3245
|
if (!component.removed) {
|
3294
3246
|
if (!component.added && useLongestToken) {
|
3295
3247
|
var value = newString.slice(newPos, newPos + component.count);
|
@@ -3301,133 +3253,66 @@ function lib_buildValues(diff, lastComponent, newString, oldString, useLongestTo
|
|
3301
3253
|
} else {
|
3302
3254
|
component.value = diff.join(newString.slice(newPos, newPos + component.count));
|
3303
3255
|
}
|
3304
|
-
newPos += component.count;
|
3305
3256
|
|
3306
|
-
// Common case
|
3257
|
+
newPos += component.count; // Common case
|
3258
|
+
|
3307
3259
|
if (!component.added) {
|
3308
3260
|
oldPos += component.count;
|
3309
3261
|
}
|
3310
3262
|
} else {
|
3311
3263
|
component.value = diff.join(oldString.slice(oldPos, oldPos + component.count));
|
3312
|
-
oldPos += component.count;
|
3264
|
+
oldPos += component.count; // Reverse add and remove so removes are output first to match common convention
|
3265
|
+
// The diffing algorithm is tied to add then remove output and this is the simplest
|
3266
|
+
// route to get the desired output with minimal overhead.
|
3267
|
+
|
3268
|
+
if (componentPos && components[componentPos - 1].added) {
|
3269
|
+
var tmp = components[componentPos - 1];
|
3270
|
+
components[componentPos - 1] = components[componentPos];
|
3271
|
+
components[componentPos] = tmp;
|
3272
|
+
}
|
3313
3273
|
}
|
3274
|
+
} // Special case handle for when one terminal is ignored (i.e. whitespace).
|
3275
|
+
// For this case we merge the terminal into the prior string and drop the change.
|
3276
|
+
// This is only available for string mode.
|
3277
|
+
|
3278
|
+
|
3279
|
+
var lastComponent = components[componentLen - 1];
|
3280
|
+
|
3281
|
+
if (componentLen > 1 && typeof lastComponent.value === 'string' && (lastComponent.added || lastComponent.removed) && diff.equals('', lastComponent.value)) {
|
3282
|
+
components[componentLen - 2].value += lastComponent.value;
|
3283
|
+
components.pop();
|
3314
3284
|
}
|
3285
|
+
|
3315
3286
|
return components;
|
3316
3287
|
}
|
3317
3288
|
|
3289
|
+
function lib_clonePath(path) {
|
3290
|
+
return {
|
3291
|
+
newPos: path.newPos,
|
3292
|
+
components: path.components.slice(0)
|
3293
|
+
};
|
3294
|
+
}
|
3295
|
+
|
3318
3296
|
var lib_characterDiff = new lib_Diff();
|
3319
3297
|
function lib_diffChars(oldStr, newStr, options) {
|
3320
3298
|
return lib_characterDiff.diff(oldStr, newStr, options);
|
3321
3299
|
}
|
3322
3300
|
|
3323
|
-
function
|
3324
|
-
|
3325
|
-
|
3326
|
-
|
3327
|
-
|
3328
|
-
|
3329
|
-
|
3330
|
-
|
3331
|
-
}
|
3332
|
-
function longestCommonSuffix(str1, str2) {
|
3333
|
-
var i;
|
3334
|
-
|
3335
|
-
// Unlike longestCommonPrefix, we need a special case to handle all scenarios
|
3336
|
-
// where we return the empty string since str1.slice(-0) will return the
|
3337
|
-
// entire string.
|
3338
|
-
if (!str1 || !str2 || str1[str1.length - 1] != str2[str2.length - 1]) {
|
3339
|
-
return '';
|
3340
|
-
}
|
3341
|
-
for (i = 0; i < str1.length && i < str2.length; i++) {
|
3342
|
-
if (str1[str1.length - (i + 1)] != str2[str2.length - (i + 1)]) {
|
3343
|
-
return str1.slice(-i);
|
3344
|
-
}
|
3345
|
-
}
|
3346
|
-
return str1.slice(-i);
|
3347
|
-
}
|
3348
|
-
function replacePrefix(string, oldPrefix, newPrefix) {
|
3349
|
-
if (string.slice(0, oldPrefix.length) != oldPrefix) {
|
3350
|
-
throw Error("string ".concat(JSON.stringify(string), " doesn't start with prefix ").concat(JSON.stringify(oldPrefix), "; this is a bug"));
|
3351
|
-
}
|
3352
|
-
return newPrefix + string.slice(oldPrefix.length);
|
3353
|
-
}
|
3354
|
-
function replaceSuffix(string, oldSuffix, newSuffix) {
|
3355
|
-
if (!oldSuffix) {
|
3356
|
-
return string + newSuffix;
|
3357
|
-
}
|
3358
|
-
if (string.slice(-oldSuffix.length) != oldSuffix) {
|
3359
|
-
throw Error("string ".concat(JSON.stringify(string), " doesn't end with suffix ").concat(JSON.stringify(oldSuffix), "; this is a bug"));
|
3360
|
-
}
|
3361
|
-
return string.slice(0, -oldSuffix.length) + newSuffix;
|
3362
|
-
}
|
3363
|
-
function removePrefix(string, oldPrefix) {
|
3364
|
-
return replacePrefix(string, oldPrefix, '');
|
3365
|
-
}
|
3366
|
-
function removeSuffix(string, oldSuffix) {
|
3367
|
-
return replaceSuffix(string, oldSuffix, '');
|
3368
|
-
}
|
3369
|
-
function maximumOverlap(string1, string2) {
|
3370
|
-
return string2.slice(0, overlapCount(string1, string2));
|
3371
|
-
}
|
3372
|
-
|
3373
|
-
// Nicked from https://stackoverflow.com/a/60422853/1709587
|
3374
|
-
function overlapCount(a, b) {
|
3375
|
-
// Deal with cases where the strings differ in length
|
3376
|
-
var startA = 0;
|
3377
|
-
if (a.length > b.length) {
|
3378
|
-
startA = a.length - b.length;
|
3379
|
-
}
|
3380
|
-
var endB = b.length;
|
3381
|
-
if (a.length < b.length) {
|
3382
|
-
endB = a.length;
|
3383
|
-
}
|
3384
|
-
// Create a back-reference for each index
|
3385
|
-
// that should be followed in case of a mismatch.
|
3386
|
-
// We only need B to make these references:
|
3387
|
-
var map = Array(endB);
|
3388
|
-
var k = 0; // Index that lags behind j
|
3389
|
-
map[0] = 0;
|
3390
|
-
for (var j = 1; j < endB; j++) {
|
3391
|
-
if (b[j] == b[k]) {
|
3392
|
-
map[j] = map[k]; // skip over the same character (optional optimisation)
|
3393
|
-
} else {
|
3394
|
-
map[j] = k;
|
3395
|
-
}
|
3396
|
-
while (k > 0 && b[j] != b[k]) {
|
3397
|
-
k = map[k];
|
3398
|
-
}
|
3399
|
-
if (b[j] == b[k]) {
|
3400
|
-
k++;
|
3401
|
-
}
|
3402
|
-
}
|
3403
|
-
// Phase 2: use these references while iterating over A
|
3404
|
-
k = 0;
|
3405
|
-
for (var i = startA; i < a.length; i++) {
|
3406
|
-
while (k > 0 && a[i] != b[k]) {
|
3407
|
-
k = map[k];
|
3408
|
-
}
|
3409
|
-
if (a[i] == b[k]) {
|
3410
|
-
k++;
|
3301
|
+
function lib_generateOptions(options, defaults) {
|
3302
|
+
if (typeof options === 'function') {
|
3303
|
+
defaults.callback = options;
|
3304
|
+
} else if (options) {
|
3305
|
+
for (var name in options) {
|
3306
|
+
/* istanbul ignore else */
|
3307
|
+
if (options.hasOwnProperty(name)) {
|
3308
|
+
defaults[name] = options[name];
|
3309
|
+
}
|
3411
3310
|
}
|
3412
3311
|
}
|
3413
|
-
return k;
|
3414
|
-
}
|
3415
|
-
|
3416
|
-
/**
|
3417
|
-
* Returns true if the string consistently uses Windows line endings.
|
3418
|
-
*/
|
3419
|
-
function hasOnlyWinLineEndings(string) {
|
3420
|
-
return string.includes('\r\n') && !string.startsWith('\n') && !string.match(/[^\r]\n/);
|
3421
|
-
}
|
3422
3312
|
|
3423
|
-
|
3424
|
-
* Returns true if the string consistently uses Unix line endings.
|
3425
|
-
*/
|
3426
|
-
function hasOnlyUnixLineEndings(string) {
|
3427
|
-
return !string.includes('\r\n') && string.includes('\n');
|
3313
|
+
return defaults;
|
3428
3314
|
}
|
3429
3315
|
|
3430
|
-
// Based on https://en.wikipedia.org/wiki/Latin_script_in_Unicode
|
3431
3316
|
//
|
3432
3317
|
// Ranges and exceptions:
|
3433
3318
|
// Latin-1 Supplement, 0080–00FF
|
@@ -3445,330 +3330,77 @@ function hasOnlyUnixLineEndings(string) {
|
|
3445
3330
|
// - U+02DC ˜ ˜ Small Tilde
|
3446
3331
|
// - U+02DD ˝ ˝ Double Acute Accent
|
3447
3332
|
// Latin Extended Additional, 1E00–1EFF
|
3448
|
-
var lib_extendedWordChars = "a-zA-Z0-9_\\u{C0}-\\u{FF}\\u{D8}-\\u{F6}\\u{F8}-\\u{2C6}\\u{2C8}-\\u{2D7}\\u{2DE}-\\u{2FF}\\u{1E00}-\\u{1EFF}";
|
3449
3333
|
|
3450
|
-
|
3451
|
-
|
3452
|
-
// - A word plus the surrounding whitespace
|
3453
|
-
// - Pure whitespace (but only in the special case where this the entire text
|
3454
|
-
// is just whitespace)
|
3455
|
-
//
|
3456
|
-
// We have to include surrounding whitespace in the tokens because the two
|
3457
|
-
// alternative approaches produce horribly broken results:
|
3458
|
-
// * If we just discard the whitespace, we can't fully reproduce the original
|
3459
|
-
// text from the sequence of tokens and any attempt to render the diff will
|
3460
|
-
// get the whitespace wrong.
|
3461
|
-
// * If we have separate tokens for whitespace, then in a typical text every
|
3462
|
-
// second token will be a single space character. But this often results in
|
3463
|
-
// the optimal diff between two texts being a perverse one that preserves
|
3464
|
-
// the spaces between words but deletes and reinserts actual common words.
|
3465
|
-
// See https://github.com/kpdecker/jsdiff/issues/160#issuecomment-1866099640
|
3466
|
-
// for an example.
|
3467
|
-
//
|
3468
|
-
// Keeping the surrounding whitespace of course has implications for .equals
|
3469
|
-
// and .join, not just .tokenize.
|
3470
|
-
|
3471
|
-
// This regex does NOT fully implement the tokenization rules described above.
|
3472
|
-
// Instead, it gives runs of whitespace their own "token". The tokenize method
|
3473
|
-
// then handles stitching whitespace tokens onto adjacent word or punctuation
|
3474
|
-
// tokens.
|
3475
|
-
var tokenizeIncludingWhitespace = new RegExp("[".concat(lib_extendedWordChars, "]+|\\s+|[^").concat(lib_extendedWordChars, "]"), 'ug');
|
3334
|
+
var lib_extendedWordChars = /^[A-Za-z\xC0-\u02C6\u02C8-\u02D7\u02DE-\u02FF\u1E00-\u1EFF]+$/;
|
3335
|
+
var lib_reWhitespace = /\S/;
|
3476
3336
|
var lib_wordDiff = new lib_Diff();
|
3477
|
-
|
3478
|
-
|
3337
|
+
|
3338
|
+
lib_wordDiff.equals = function (left, right) {
|
3339
|
+
if (this.options.ignoreCase) {
|
3479
3340
|
left = left.toLowerCase();
|
3480
3341
|
right = right.toLowerCase();
|
3481
3342
|
}
|
3482
|
-
|
3343
|
+
|
3344
|
+
return left === right || this.options.ignoreWhitespace && !lib_reWhitespace.test(left) && !lib_reWhitespace.test(right);
|
3483
3345
|
};
|
3346
|
+
|
3484
3347
|
lib_wordDiff.tokenize = function (value) {
|
3485
|
-
|
3486
|
-
var
|
3487
|
-
|
3488
|
-
|
3489
|
-
|
3348
|
+
// All whitespace symbols except newline group into one token, each newline - in separate token
|
3349
|
+
var tokens = value.split(/([^\S\r\n]+|[()[\]{}'"\r\n]|\b)/); // Join the boundary splits that we do not consider to be boundaries. This is primarily the extended Latin character set.
|
3350
|
+
|
3351
|
+
for (var i = 0; i < tokens.length - 1; i++) {
|
3352
|
+
// If we have an empty string in the next field and we have only word chars before and after, merge
|
3353
|
+
if (!tokens[i + 1] && tokens[i + 2] && lib_extendedWordChars.test(tokens[i]) && lib_extendedWordChars.test(tokens[i + 2])) {
|
3354
|
+
tokens[i] += tokens[i + 2];
|
3355
|
+
tokens.splice(i + 1, 2);
|
3356
|
+
i--;
|
3490
3357
|
}
|
3491
|
-
parts = Array.from(options.intlSegmenter.segment(value), function (segment) {
|
3492
|
-
return segment.segment;
|
3493
|
-
});
|
3494
|
-
} else {
|
3495
|
-
parts = value.match(tokenizeIncludingWhitespace) || [];
|
3496
3358
|
}
|
3497
|
-
|
3498
|
-
var prevPart = null;
|
3499
|
-
parts.forEach(function (part) {
|
3500
|
-
if (/\s/.test(part)) {
|
3501
|
-
if (prevPart == null) {
|
3502
|
-
tokens.push(part);
|
3503
|
-
} else {
|
3504
|
-
tokens.push(tokens.pop() + part);
|
3505
|
-
}
|
3506
|
-
} else if (/\s/.test(prevPart)) {
|
3507
|
-
if (tokens[tokens.length - 1] == prevPart) {
|
3508
|
-
tokens.push(tokens.pop() + part);
|
3509
|
-
} else {
|
3510
|
-
tokens.push(prevPart + part);
|
3511
|
-
}
|
3512
|
-
} else {
|
3513
|
-
tokens.push(part);
|
3514
|
-
}
|
3515
|
-
prevPart = part;
|
3516
|
-
});
|
3359
|
+
|
3517
3360
|
return tokens;
|
3518
3361
|
};
|
3519
|
-
|
3520
|
-
// Tokens being joined here will always have appeared consecutively in the
|
3521
|
-
// same text, so we can simply strip off the leading whitespace from all the
|
3522
|
-
// tokens except the first (and except any whitespace-only tokens - but such
|
3523
|
-
// a token will always be the first and only token anyway) and then join them
|
3524
|
-
// and the whitespace around words and punctuation will end up correct.
|
3525
|
-
return tokens.map(function (token, i) {
|
3526
|
-
if (i == 0) {
|
3527
|
-
return token;
|
3528
|
-
} else {
|
3529
|
-
return token.replace(/^\s+/, '');
|
3530
|
-
}
|
3531
|
-
}).join('');
|
3532
|
-
};
|
3533
|
-
lib_wordDiff.postProcess = function (changes, options) {
|
3534
|
-
if (!changes || options.oneChangePerToken) {
|
3535
|
-
return changes;
|
3536
|
-
}
|
3537
|
-
var lastKeep = null;
|
3538
|
-
// Change objects representing any insertion or deletion since the last
|
3539
|
-
// "keep" change object. There can be at most one of each.
|
3540
|
-
var insertion = null;
|
3541
|
-
var deletion = null;
|
3542
|
-
changes.forEach(function (change) {
|
3543
|
-
if (change.added) {
|
3544
|
-
insertion = change;
|
3545
|
-
} else if (change.removed) {
|
3546
|
-
deletion = change;
|
3547
|
-
} else {
|
3548
|
-
if (insertion || deletion) {
|
3549
|
-
// May be false at start of text
|
3550
|
-
dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, change);
|
3551
|
-
}
|
3552
|
-
lastKeep = change;
|
3553
|
-
insertion = null;
|
3554
|
-
deletion = null;
|
3555
|
-
}
|
3556
|
-
});
|
3557
|
-
if (insertion || deletion) {
|
3558
|
-
dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, null);
|
3559
|
-
}
|
3560
|
-
return changes;
|
3561
|
-
};
|
3362
|
+
|
3562
3363
|
function lib_diffWords(oldStr, newStr, options) {
|
3563
|
-
|
3564
|
-
|
3565
|
-
|
3566
|
-
// for the sake of backwards compatibility.
|
3567
|
-
if ((options === null || options === void 0 ? void 0 : options.ignoreWhitespace) != null && !options.ignoreWhitespace) {
|
3568
|
-
return lib_diffWordsWithSpace(oldStr, newStr, options);
|
3569
|
-
}
|
3364
|
+
options = lib_generateOptions(options, {
|
3365
|
+
ignoreWhitespace: true
|
3366
|
+
});
|
3570
3367
|
return lib_wordDiff.diff(oldStr, newStr, options);
|
3571
3368
|
}
|
3572
|
-
function dedupeWhitespaceInChangeObjects(startKeep, deletion, insertion, endKeep) {
|
3573
|
-
// Before returning, we tidy up the leading and trailing whitespace of the
|
3574
|
-
// change objects to eliminate cases where trailing whitespace in one object
|
3575
|
-
// is repeated as leading whitespace in the next.
|
3576
|
-
// Below are examples of the outcomes we want here to explain the code.
|
3577
|
-
// I=insert, K=keep, D=delete
|
3578
|
-
// 1. diffing 'foo bar baz' vs 'foo baz'
|
3579
|
-
// Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
|
3580
|
-
// After cleanup, we want: K:'foo ' D:'bar ' K:'baz'
|
3581
|
-
//
|
3582
|
-
// 2. Diffing 'foo bar baz' vs 'foo qux baz'
|
3583
|
-
// Prior to cleanup, we have K:'foo ' D:' bar ' I:' qux ' K:' baz'
|
3584
|
-
// After cleanup, we want K:'foo ' D:'bar' I:'qux' K:' baz'
|
3585
|
-
//
|
3586
|
-
// 3. Diffing 'foo\nbar baz' vs 'foo baz'
|
3587
|
-
// Prior to cleanup, we have K:'foo ' D:'\nbar ' K:' baz'
|
3588
|
-
// After cleanup, we want K'foo' D:'\nbar' K:' baz'
|
3589
|
-
//
|
3590
|
-
// 4. Diffing 'foo baz' vs 'foo\nbar baz'
|
3591
|
-
// Prior to cleanup, we have K:'foo\n' I:'\nbar ' K:' baz'
|
3592
|
-
// After cleanup, we ideally want K'foo' I:'\nbar' K:' baz'
|
3593
|
-
// but don't actually manage this currently (the pre-cleanup change
|
3594
|
-
// objects don't contain enough information to make it possible).
|
3595
|
-
//
|
3596
|
-
// 5. Diffing 'foo bar baz' vs 'foo baz'
|
3597
|
-
// Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
|
3598
|
-
// After cleanup, we want K:'foo ' D:' bar ' K:'baz'
|
3599
|
-
//
|
3600
|
-
// Our handling is unavoidably imperfect in the case where there's a single
|
3601
|
-
// indel between keeps and the whitespace has changed. For instance, consider
|
3602
|
-
// diffing 'foo\tbar\nbaz' vs 'foo baz'. Unless we create an extra change
|
3603
|
-
// object to represent the insertion of the space character (which isn't even
|
3604
|
-
// a token), we have no way to avoid losing information about the texts'
|
3605
|
-
// original whitespace in the result we return. Still, we do our best to
|
3606
|
-
// output something that will look sensible if we e.g. print it with
|
3607
|
-
// insertions in green and deletions in red.
|
3608
|
-
|
3609
|
-
// Between two "keep" change objects (or before the first or after the last
|
3610
|
-
// change object), we can have either:
|
3611
|
-
// * A "delete" followed by an "insert"
|
3612
|
-
// * Just an "insert"
|
3613
|
-
// * Just a "delete"
|
3614
|
-
// We handle the three cases separately.
|
3615
|
-
if (deletion && insertion) {
|
3616
|
-
var oldWsPrefix = deletion.value.match(/^\s*/)[0];
|
3617
|
-
var oldWsSuffix = deletion.value.match(/\s*$/)[0];
|
3618
|
-
var newWsPrefix = insertion.value.match(/^\s*/)[0];
|
3619
|
-
var newWsSuffix = insertion.value.match(/\s*$/)[0];
|
3620
|
-
if (startKeep) {
|
3621
|
-
var commonWsPrefix = longestCommonPrefix(oldWsPrefix, newWsPrefix);
|
3622
|
-
startKeep.value = replaceSuffix(startKeep.value, newWsPrefix, commonWsPrefix);
|
3623
|
-
deletion.value = removePrefix(deletion.value, commonWsPrefix);
|
3624
|
-
insertion.value = removePrefix(insertion.value, commonWsPrefix);
|
3625
|
-
}
|
3626
|
-
if (endKeep) {
|
3627
|
-
var commonWsSuffix = longestCommonSuffix(oldWsSuffix, newWsSuffix);
|
3628
|
-
endKeep.value = replacePrefix(endKeep.value, newWsSuffix, commonWsSuffix);
|
3629
|
-
deletion.value = removeSuffix(deletion.value, commonWsSuffix);
|
3630
|
-
insertion.value = removeSuffix(insertion.value, commonWsSuffix);
|
3631
|
-
}
|
3632
|
-
} else if (insertion) {
|
3633
|
-
// The whitespaces all reflect what was in the new text rather than
|
3634
|
-
// the old, so we essentially have no information about whitespace
|
3635
|
-
// insertion or deletion. We just want to dedupe the whitespace.
|
3636
|
-
// We do that by having each change object keep its trailing
|
3637
|
-
// whitespace and deleting duplicate leading whitespace where
|
3638
|
-
// present.
|
3639
|
-
if (startKeep) {
|
3640
|
-
insertion.value = insertion.value.replace(/^\s*/, '');
|
3641
|
-
}
|
3642
|
-
if (endKeep) {
|
3643
|
-
endKeep.value = endKeep.value.replace(/^\s*/, '');
|
3644
|
-
}
|
3645
|
-
// otherwise we've got a deletion and no insertion
|
3646
|
-
} else if (startKeep && endKeep) {
|
3647
|
-
var newWsFull = endKeep.value.match(/^\s*/)[0],
|
3648
|
-
delWsStart = deletion.value.match(/^\s*/)[0],
|
3649
|
-
delWsEnd = deletion.value.match(/\s*$/)[0];
|
3650
|
-
|
3651
|
-
// Any whitespace that comes straight after startKeep in both the old and
|
3652
|
-
// new texts, assign to startKeep and remove from the deletion.
|
3653
|
-
var newWsStart = longestCommonPrefix(newWsFull, delWsStart);
|
3654
|
-
deletion.value = removePrefix(deletion.value, newWsStart);
|
3655
|
-
|
3656
|
-
// Any whitespace that comes straight before endKeep in both the old and
|
3657
|
-
// new texts, and hasn't already been assigned to startKeep, assign to
|
3658
|
-
// endKeep and remove from the deletion.
|
3659
|
-
var newWsEnd = longestCommonSuffix(removePrefix(newWsFull, newWsStart), delWsEnd);
|
3660
|
-
deletion.value = removeSuffix(deletion.value, newWsEnd);
|
3661
|
-
endKeep.value = replacePrefix(endKeep.value, newWsFull, newWsEnd);
|
3662
|
-
|
3663
|
-
// If there's any whitespace from the new text that HASN'T already been
|
3664
|
-
// assigned, assign it to the start:
|
3665
|
-
startKeep.value = replaceSuffix(startKeep.value, newWsFull, newWsFull.slice(0, newWsFull.length - newWsEnd.length));
|
3666
|
-
} else if (endKeep) {
|
3667
|
-
// We are at the start of the text. Preserve all the whitespace on
|
3668
|
-
// endKeep, and just remove whitespace from the end of deletion to the
|
3669
|
-
// extent that it overlaps with the start of endKeep.
|
3670
|
-
var endKeepWsPrefix = endKeep.value.match(/^\s*/)[0];
|
3671
|
-
var deletionWsSuffix = deletion.value.match(/\s*$/)[0];
|
3672
|
-
var overlap = maximumOverlap(deletionWsSuffix, endKeepWsPrefix);
|
3673
|
-
deletion.value = removeSuffix(deletion.value, overlap);
|
3674
|
-
} else if (startKeep) {
|
3675
|
-
// We are at the END of the text. Preserve all the whitespace on
|
3676
|
-
// startKeep, and just remove whitespace from the start of deletion to
|
3677
|
-
// the extent that it overlaps with the end of startKeep.
|
3678
|
-
var startKeepWsSuffix = startKeep.value.match(/\s*$/)[0];
|
3679
|
-
var deletionWsPrefix = deletion.value.match(/^\s*/)[0];
|
3680
|
-
var _overlap = maximumOverlap(startKeepWsSuffix, deletionWsPrefix);
|
3681
|
-
deletion.value = removePrefix(deletion.value, _overlap);
|
3682
|
-
}
|
3683
|
-
}
|
3684
|
-
var wordWithSpaceDiff = new lib_Diff();
|
3685
|
-
wordWithSpaceDiff.tokenize = function (value) {
|
3686
|
-
// Slightly different to the tokenizeIncludingWhitespace regex used above in
|
3687
|
-
// that this one treats each individual newline as a distinct tokens, rather
|
3688
|
-
// than merging them into other surrounding whitespace. This was requested
|
3689
|
-
// in https://github.com/kpdecker/jsdiff/issues/180 &
|
3690
|
-
// https://github.com/kpdecker/jsdiff/issues/211
|
3691
|
-
var regex = new RegExp("(\\r?\\n)|[".concat(lib_extendedWordChars, "]+|[^\\S\\n\\r]+|[^").concat(lib_extendedWordChars, "]"), 'ug');
|
3692
|
-
return value.match(regex) || [];
|
3693
|
-
};
|
3694
3369
|
function lib_diffWordsWithSpace(oldStr, newStr, options) {
|
3695
|
-
return
|
3696
|
-
}
|
3697
|
-
|
3698
|
-
function lib_generateOptions(options, defaults) {
|
3699
|
-
if (typeof options === 'function') {
|
3700
|
-
defaults.callback = options;
|
3701
|
-
} else if (options) {
|
3702
|
-
for (var name in options) {
|
3703
|
-
/* istanbul ignore else */
|
3704
|
-
if (options.hasOwnProperty(name)) {
|
3705
|
-
defaults[name] = options[name];
|
3706
|
-
}
|
3707
|
-
}
|
3708
|
-
}
|
3709
|
-
return defaults;
|
3370
|
+
return lib_wordDiff.diff(oldStr, newStr, options);
|
3710
3371
|
}
|
3711
3372
|
|
3712
3373
|
var lib_lineDiff = new lib_Diff();
|
3713
|
-
|
3714
|
-
|
3715
|
-
// remove one \r before \n to match GNU diff's --strip-trailing-cr behavior
|
3716
|
-
value = value.replace(/\r\n/g, '\n');
|
3717
|
-
}
|
3374
|
+
|
3375
|
+
lib_lineDiff.tokenize = function (value) {
|
3718
3376
|
var retLines = [],
|
3719
|
-
|
3377
|
+
linesAndNewlines = value.split(/(\n|\r\n)/); // Ignore the final empty token that occurs if the string ends with a new line
|
3720
3378
|
|
3721
|
-
// Ignore the final empty token that occurs if the string ends with a new line
|
3722
3379
|
if (!linesAndNewlines[linesAndNewlines.length - 1]) {
|
3723
3380
|
linesAndNewlines.pop();
|
3724
|
-
}
|
3381
|
+
} // Merge the content and line separators into single tokens
|
3382
|
+
|
3725
3383
|
|
3726
|
-
// Merge the content and line separators into single tokens
|
3727
3384
|
for (var i = 0; i < linesAndNewlines.length; i++) {
|
3728
3385
|
var line = linesAndNewlines[i];
|
3729
|
-
|
3386
|
+
|
3387
|
+
if (i % 2 && !this.options.newlineIsToken) {
|
3730
3388
|
retLines[retLines.length - 1] += line;
|
3731
3389
|
} else {
|
3390
|
+
if (this.options.ignoreWhitespace) {
|
3391
|
+
line = line.trim();
|
3392
|
+
}
|
3393
|
+
|
3732
3394
|
retLines.push(line);
|
3733
3395
|
}
|
3734
3396
|
}
|
3397
|
+
|
3735
3398
|
return retLines;
|
3736
3399
|
};
|
3737
|
-
|
3738
|
-
// If we're ignoring whitespace, we need to normalise lines by stripping
|
3739
|
-
// whitespace before checking equality. (This has an annoying interaction
|
3740
|
-
// with newlineIsToken that requires special handling: if newlines get their
|
3741
|
-
// own token, then we DON'T want to trim the *newline* tokens down to empty
|
3742
|
-
// strings, since this would cause us to treat whitespace-only line content
|
3743
|
-
// as equal to a separator between lines, which would be weird and
|
3744
|
-
// inconsistent with the documented behavior of the options.)
|
3745
|
-
if (options.ignoreWhitespace) {
|
3746
|
-
if (!options.newlineIsToken || !left.includes('\n')) {
|
3747
|
-
left = left.trim();
|
3748
|
-
}
|
3749
|
-
if (!options.newlineIsToken || !right.includes('\n')) {
|
3750
|
-
right = right.trim();
|
3751
|
-
}
|
3752
|
-
} else if (options.ignoreNewlineAtEof && !options.newlineIsToken) {
|
3753
|
-
if (left.endsWith('\n')) {
|
3754
|
-
left = left.slice(0, -1);
|
3755
|
-
}
|
3756
|
-
if (right.endsWith('\n')) {
|
3757
|
-
right = right.slice(0, -1);
|
3758
|
-
}
|
3759
|
-
}
|
3760
|
-
return lib_Diff.prototype.equals.call(this, left, right, options);
|
3761
|
-
};
|
3400
|
+
|
3762
3401
|
function lib_diffLines(oldStr, newStr, callback) {
|
3763
3402
|
return lib_lineDiff.diff(oldStr, newStr, callback);
|
3764
3403
|
}
|
3765
|
-
|
3766
|
-
// Kept for backwards compatibility. This is a rather arbitrary wrapper method
|
3767
|
-
// that just calls `diffLines` with `ignoreWhitespace: true`. It's confusing to
|
3768
|
-
// have two ways to do exactly the same thing in the API, so we no longer
|
3769
|
-
// document this one (library users should explicitly use `diffLines` with
|
3770
|
-
// `ignoreWhitespace: true` instead) but we keep it around to maintain
|
3771
|
-
// compatibility with code that used old versions.
|
3772
3404
|
function lib_diffTrimmedLines(oldStr, newStr, callback) {
|
3773
3405
|
var options = lib_generateOptions(callback, {
|
3774
3406
|
ignoreWhitespace: true
|
@@ -3777,88 +3409,53 @@ function lib_diffTrimmedLines(oldStr, newStr, callback) {
|
|
3777
3409
|
}
|
3778
3410
|
|
3779
3411
|
var lib_sentenceDiff = new lib_Diff();
|
3412
|
+
|
3780
3413
|
lib_sentenceDiff.tokenize = function (value) {
|
3781
3414
|
return value.split(/(\S.+?[.!?])(?=\s+|$)/);
|
3782
3415
|
};
|
3416
|
+
|
3783
3417
|
function lib_diffSentences(oldStr, newStr, callback) {
|
3784
3418
|
return lib_sentenceDiff.diff(oldStr, newStr, callback);
|
3785
3419
|
}
|
3786
3420
|
|
3787
3421
|
var lib_cssDiff = new lib_Diff();
|
3422
|
+
|
3788
3423
|
lib_cssDiff.tokenize = function (value) {
|
3789
3424
|
return value.split(/([{}:;,]|\s+)/);
|
3790
3425
|
};
|
3426
|
+
|
3791
3427
|
function lib_diffCss(oldStr, newStr, callback) {
|
3792
3428
|
return lib_cssDiff.diff(oldStr, newStr, callback);
|
3793
3429
|
}
|
3794
3430
|
|
3795
|
-
function
|
3796
|
-
var t = Object.keys(e);
|
3797
|
-
if (Object.getOwnPropertySymbols) {
|
3798
|
-
var o = Object.getOwnPropertySymbols(e);
|
3799
|
-
r && (o = o.filter(function (r) {
|
3800
|
-
return Object.getOwnPropertyDescriptor(e, r).enumerable;
|
3801
|
-
})), t.push.apply(t, o);
|
3802
|
-
}
|
3803
|
-
return t;
|
3804
|
-
}
|
3805
|
-
function _objectSpread2(e) {
|
3806
|
-
for (var r = 1; r < arguments.length; r++) {
|
3807
|
-
var t = null != arguments[r] ? arguments[r] : {};
|
3808
|
-
r % 2 ? ownKeys(Object(t), !0).forEach(function (r) {
|
3809
|
-
_defineProperty(e, r, t[r]);
|
3810
|
-
}) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) {
|
3811
|
-
Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r));
|
3812
|
-
});
|
3813
|
-
}
|
3814
|
-
return e;
|
3815
|
-
}
|
3816
|
-
function _toPrimitive(t, r) {
|
3817
|
-
if ("object" != typeof t || !t) return t;
|
3818
|
-
var e = t[Symbol.toPrimitive];
|
3819
|
-
if (void 0 !== e) {
|
3820
|
-
var i = e.call(t, r || "default");
|
3821
|
-
if ("object" != typeof i) return i;
|
3822
|
-
throw new TypeError("@@toPrimitive must return a primitive value.");
|
3823
|
-
}
|
3824
|
-
return ("string" === r ? String : Number)(t);
|
3825
|
-
}
|
3826
|
-
function _toPropertyKey(t) {
|
3827
|
-
var i = _toPrimitive(t, "string");
|
3828
|
-
return "symbol" == typeof i ? i : i + "";
|
3829
|
-
}
|
3830
|
-
function lib_typeof(o) {
|
3431
|
+
function lib_typeof(obj) {
|
3831
3432
|
"@babel/helpers - typeof";
|
3832
3433
|
|
3833
|
-
|
3834
|
-
|
3835
|
-
|
3836
|
-
|
3837
|
-
}, lib_typeof(o);
|
3838
|
-
}
|
3839
|
-
function _defineProperty(obj, key, value) {
|
3840
|
-
key = _toPropertyKey(key);
|
3841
|
-
if (key in obj) {
|
3842
|
-
Object.defineProperty(obj, key, {
|
3843
|
-
value: value,
|
3844
|
-
enumerable: true,
|
3845
|
-
configurable: true,
|
3846
|
-
writable: true
|
3847
|
-
});
|
3434
|
+
if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
|
3435
|
+
lib_typeof = function (obj) {
|
3436
|
+
return typeof obj;
|
3437
|
+
};
|
3848
3438
|
} else {
|
3849
|
-
|
3439
|
+
lib_typeof = function (obj) {
|
3440
|
+
return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
|
3441
|
+
};
|
3850
3442
|
}
|
3851
|
-
|
3443
|
+
|
3444
|
+
return lib_typeof(obj);
|
3852
3445
|
}
|
3446
|
+
|
3853
3447
|
function lib_toConsumableArray(arr) {
|
3854
3448
|
return lib_arrayWithoutHoles(arr) || lib_iterableToArray(arr) || diff_lib_unsupportedIterableToArray(arr) || lib_nonIterableSpread();
|
3855
3449
|
}
|
3450
|
+
|
3856
3451
|
function lib_arrayWithoutHoles(arr) {
|
3857
3452
|
if (Array.isArray(arr)) return diff_lib_arrayLikeToArray(arr);
|
3858
3453
|
}
|
3454
|
+
|
3859
3455
|
function lib_iterableToArray(iter) {
|
3860
|
-
if (typeof Symbol !== "undefined" &&
|
3456
|
+
if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter)) return Array.from(iter);
|
3861
3457
|
}
|
3458
|
+
|
3862
3459
|
function diff_lib_unsupportedIterableToArray(o, minLen) {
|
3863
3460
|
if (!o) return;
|
3864
3461
|
if (typeof o === "string") return diff_lib_arrayLikeToArray(o, minLen);
|
@@ -3867,263 +3464,238 @@ function diff_lib_unsupportedIterableToArray(o, minLen) {
|
|
3867
3464
|
if (n === "Map" || n === "Set") return Array.from(o);
|
3868
3465
|
if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return diff_lib_arrayLikeToArray(o, minLen);
|
3869
3466
|
}
|
3467
|
+
|
3870
3468
|
function diff_lib_arrayLikeToArray(arr, len) {
|
3871
3469
|
if (len == null || len > arr.length) len = arr.length;
|
3470
|
+
|
3872
3471
|
for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
|
3472
|
+
|
3873
3473
|
return arr2;
|
3874
3474
|
}
|
3475
|
+
|
3875
3476
|
function lib_nonIterableSpread() {
|
3876
3477
|
throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
|
3877
3478
|
}
|
3878
3479
|
|
3879
|
-
var
|
3880
|
-
// Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
|
3480
|
+
var lib_objectPrototypeToString = Object.prototype.toString;
|
3481
|
+
var lib_jsonDiff = new lib_Diff(); // Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
|
3881
3482
|
// dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
|
3483
|
+
|
3882
3484
|
lib_jsonDiff.useLongestToken = true;
|
3883
3485
|
lib_jsonDiff.tokenize = lib_lineDiff.tokenize;
|
3884
|
-
|
3885
|
-
|
3886
|
-
|
3887
|
-
|
3888
|
-
|
3889
|
-
|
3486
|
+
|
3487
|
+
lib_jsonDiff.castInput = function (value) {
|
3488
|
+
var _this$options = this.options,
|
3489
|
+
undefinedReplacement = _this$options.undefinedReplacement,
|
3490
|
+
_this$options$stringi = _this$options.stringifyReplacer,
|
3491
|
+
stringifyReplacer = _this$options$stringi === void 0 ? function (k, v) {
|
3492
|
+
return typeof v === 'undefined' ? undefinedReplacement : v;
|
3493
|
+
} : _this$options$stringi;
|
3890
3494
|
return typeof value === 'string' ? value : JSON.stringify(lib_canonicalize(value, null, null, stringifyReplacer), stringifyReplacer, ' ');
|
3891
3495
|
};
|
3892
|
-
|
3893
|
-
|
3496
|
+
|
3497
|
+
lib_jsonDiff.equals = function (left, right) {
|
3498
|
+
return lib_Diff.prototype.equals.call(lib_jsonDiff, left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'));
|
3894
3499
|
};
|
3500
|
+
|
3895
3501
|
function lib_diffJson(oldObj, newObj, options) {
|
3896
3502
|
return lib_jsonDiff.diff(oldObj, newObj, options);
|
3897
|
-
}
|
3898
|
-
|
3899
|
-
// This function handles the presence of circular references by bailing out when encountering an
|
3503
|
+
} // This function handles the presence of circular references by bailing out when encountering an
|
3900
3504
|
// object that is already on the "stack" of items being processed. Accepts an optional replacer
|
3505
|
+
|
3901
3506
|
function lib_canonicalize(obj, stack, replacementStack, replacer, key) {
|
3902
3507
|
stack = stack || [];
|
3903
3508
|
replacementStack = replacementStack || [];
|
3509
|
+
|
3904
3510
|
if (replacer) {
|
3905
3511
|
obj = replacer(key, obj);
|
3906
3512
|
}
|
3513
|
+
|
3907
3514
|
var i;
|
3515
|
+
|
3908
3516
|
for (i = 0; i < stack.length; i += 1) {
|
3909
3517
|
if (stack[i] === obj) {
|
3910
3518
|
return replacementStack[i];
|
3911
3519
|
}
|
3912
3520
|
}
|
3521
|
+
|
3913
3522
|
var canonicalizedObj;
|
3914
|
-
|
3523
|
+
|
3524
|
+
if ('[object Array]' === lib_objectPrototypeToString.call(obj)) {
|
3915
3525
|
stack.push(obj);
|
3916
3526
|
canonicalizedObj = new Array(obj.length);
|
3917
3527
|
replacementStack.push(canonicalizedObj);
|
3528
|
+
|
3918
3529
|
for (i = 0; i < obj.length; i += 1) {
|
3919
3530
|
canonicalizedObj[i] = lib_canonicalize(obj[i], stack, replacementStack, replacer, key);
|
3920
3531
|
}
|
3532
|
+
|
3921
3533
|
stack.pop();
|
3922
3534
|
replacementStack.pop();
|
3923
3535
|
return canonicalizedObj;
|
3924
3536
|
}
|
3537
|
+
|
3925
3538
|
if (obj && obj.toJSON) {
|
3926
3539
|
obj = obj.toJSON();
|
3927
3540
|
}
|
3541
|
+
|
3928
3542
|
if (lib_typeof(obj) === 'object' && obj !== null) {
|
3929
3543
|
stack.push(obj);
|
3930
3544
|
canonicalizedObj = {};
|
3931
3545
|
replacementStack.push(canonicalizedObj);
|
3546
|
+
|
3932
3547
|
var sortedKeys = [],
|
3933
|
-
|
3548
|
+
_key;
|
3549
|
+
|
3934
3550
|
for (_key in obj) {
|
3935
3551
|
/* istanbul ignore else */
|
3936
|
-
if (
|
3552
|
+
if (obj.hasOwnProperty(_key)) {
|
3937
3553
|
sortedKeys.push(_key);
|
3938
3554
|
}
|
3939
3555
|
}
|
3556
|
+
|
3940
3557
|
sortedKeys.sort();
|
3558
|
+
|
3941
3559
|
for (i = 0; i < sortedKeys.length; i += 1) {
|
3942
3560
|
_key = sortedKeys[i];
|
3943
3561
|
canonicalizedObj[_key] = lib_canonicalize(obj[_key], stack, replacementStack, replacer, _key);
|
3944
3562
|
}
|
3563
|
+
|
3945
3564
|
stack.pop();
|
3946
3565
|
replacementStack.pop();
|
3947
3566
|
} else {
|
3948
3567
|
canonicalizedObj = obj;
|
3949
3568
|
}
|
3569
|
+
|
3950
3570
|
return canonicalizedObj;
|
3951
3571
|
}
|
3952
3572
|
|
3953
3573
|
var lib_arrayDiff = new lib_Diff();
|
3574
|
+
|
3954
3575
|
lib_arrayDiff.tokenize = function (value) {
|
3955
3576
|
return value.slice();
|
3956
3577
|
};
|
3578
|
+
|
3957
3579
|
lib_arrayDiff.join = lib_arrayDiff.removeEmpty = function (value) {
|
3958
3580
|
return value;
|
3959
3581
|
};
|
3582
|
+
|
3960
3583
|
function lib_diffArrays(oldArr, newArr, callback) {
|
3961
3584
|
return lib_arrayDiff.diff(oldArr, newArr, callback);
|
3962
3585
|
}
|
3963
3586
|
|
3964
|
-
function unixToWin(patch) {
|
3965
|
-
if (Array.isArray(patch)) {
|
3966
|
-
return patch.map(unixToWin);
|
3967
|
-
}
|
3968
|
-
return _objectSpread2(_objectSpread2({}, patch), {}, {
|
3969
|
-
hunks: patch.hunks.map(function (hunk) {
|
3970
|
-
return _objectSpread2(_objectSpread2({}, hunk), {}, {
|
3971
|
-
lines: hunk.lines.map(function (line, i) {
|
3972
|
-
var _hunk$lines;
|
3973
|
-
return line.startsWith('\\') || line.endsWith('\r') || (_hunk$lines = hunk.lines[i + 1]) !== null && _hunk$lines !== void 0 && _hunk$lines.startsWith('\\') ? line : line + '\r';
|
3974
|
-
})
|
3975
|
-
});
|
3976
|
-
})
|
3977
|
-
});
|
3978
|
-
}
|
3979
|
-
function winToUnix(patch) {
|
3980
|
-
if (Array.isArray(patch)) {
|
3981
|
-
return patch.map(winToUnix);
|
3982
|
-
}
|
3983
|
-
return _objectSpread2(_objectSpread2({}, patch), {}, {
|
3984
|
-
hunks: patch.hunks.map(function (hunk) {
|
3985
|
-
return _objectSpread2(_objectSpread2({}, hunk), {}, {
|
3986
|
-
lines: hunk.lines.map(function (line) {
|
3987
|
-
return line.endsWith('\r') ? line.substring(0, line.length - 1) : line;
|
3988
|
-
})
|
3989
|
-
});
|
3990
|
-
})
|
3991
|
-
});
|
3992
|
-
}
|
3993
|
-
|
3994
|
-
/**
|
3995
|
-
* Returns true if the patch consistently uses Unix line endings (or only involves one line and has
|
3996
|
-
* no line endings).
|
3997
|
-
*/
|
3998
|
-
function isUnix(patch) {
|
3999
|
-
if (!Array.isArray(patch)) {
|
4000
|
-
patch = [patch];
|
4001
|
-
}
|
4002
|
-
return !patch.some(function (index) {
|
4003
|
-
return index.hunks.some(function (hunk) {
|
4004
|
-
return hunk.lines.some(function (line) {
|
4005
|
-
return !line.startsWith('\\') && line.endsWith('\r');
|
4006
|
-
});
|
4007
|
-
});
|
4008
|
-
});
|
4009
|
-
}
|
4010
|
-
|
4011
|
-
/**
|
4012
|
-
* Returns true if the patch uses Windows line endings and only Windows line endings.
|
4013
|
-
*/
|
4014
|
-
function isWin(patch) {
|
4015
|
-
if (!Array.isArray(patch)) {
|
4016
|
-
patch = [patch];
|
4017
|
-
}
|
4018
|
-
return patch.some(function (index) {
|
4019
|
-
return index.hunks.some(function (hunk) {
|
4020
|
-
return hunk.lines.some(function (line) {
|
4021
|
-
return line.endsWith('\r');
|
4022
|
-
});
|
4023
|
-
});
|
4024
|
-
}) && patch.every(function (index) {
|
4025
|
-
return index.hunks.every(function (hunk) {
|
4026
|
-
return hunk.lines.every(function (line, i) {
|
4027
|
-
var _hunk$lines2;
|
4028
|
-
return line.startsWith('\\') || line.endsWith('\r') || ((_hunk$lines2 = hunk.lines[i + 1]) === null || _hunk$lines2 === void 0 ? void 0 : _hunk$lines2.startsWith('\\'));
|
4029
|
-
});
|
4030
|
-
});
|
4031
|
-
});
|
4032
|
-
}
|
4033
|
-
|
4034
3587
|
function lib_parsePatch(uniDiff) {
|
4035
|
-
var
|
4036
|
-
|
4037
|
-
|
3588
|
+
var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
|
3589
|
+
var diffstr = uniDiff.split(/\r\n|[\n\v\f\r\x85]/),
|
3590
|
+
delimiters = uniDiff.match(/\r\n|[\n\v\f\r\x85]/g) || [],
|
3591
|
+
list = [],
|
3592
|
+
i = 0;
|
3593
|
+
|
4038
3594
|
function parseIndex() {
|
4039
3595
|
var index = {};
|
4040
|
-
list.push(index);
|
3596
|
+
list.push(index); // Parse diff metadata
|
4041
3597
|
|
4042
|
-
// Parse diff metadata
|
4043
3598
|
while (i < diffstr.length) {
|
4044
|
-
var line = diffstr[i];
|
3599
|
+
var line = diffstr[i]; // File header found, end parsing diff metadata
|
4045
3600
|
|
4046
|
-
// File header found, end parsing diff metadata
|
4047
3601
|
if (/^(\-\-\-|\+\+\+|@@)\s/.test(line)) {
|
4048
3602
|
break;
|
4049
|
-
}
|
3603
|
+
} // Diff index
|
3604
|
+
|
4050
3605
|
|
4051
|
-
// Diff index
|
4052
3606
|
var header = /^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/.exec(line);
|
3607
|
+
|
4053
3608
|
if (header) {
|
4054
3609
|
index.index = header[1];
|
4055
3610
|
}
|
4056
|
-
i++;
|
4057
|
-
}
|
4058
3611
|
|
4059
|
-
|
3612
|
+
i++;
|
3613
|
+
} // Parse file headers if they are defined. Unified diff requires them, but
|
4060
3614
|
// there's no technical issues to have an isolated hunk without file header
|
3615
|
+
|
3616
|
+
|
4061
3617
|
parseFileHeader(index);
|
4062
|
-
parseFileHeader(index);
|
3618
|
+
parseFileHeader(index); // Parse hunks
|
4063
3619
|
|
4064
|
-
// Parse hunks
|
4065
3620
|
index.hunks = [];
|
3621
|
+
|
4066
3622
|
while (i < diffstr.length) {
|
4067
3623
|
var _line = diffstr[i];
|
4068
|
-
|
3624
|
+
|
3625
|
+
if (/^(Index:|diff|\-\-\-|\+\+\+)\s/.test(_line)) {
|
4069
3626
|
break;
|
4070
3627
|
} else if (/^@@/.test(_line)) {
|
4071
3628
|
index.hunks.push(parseHunk());
|
4072
|
-
} else if (_line) {
|
3629
|
+
} else if (_line && options.strict) {
|
3630
|
+
// Ignore unexpected content unless in strict mode
|
4073
3631
|
throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(_line));
|
4074
3632
|
} else {
|
4075
3633
|
i++;
|
4076
3634
|
}
|
4077
3635
|
}
|
4078
|
-
}
|
4079
|
-
|
4080
|
-
// Parses the --- and +++ headers, if none are found, no lines
|
3636
|
+
} // Parses the --- and +++ headers, if none are found, no lines
|
4081
3637
|
// are consumed.
|
3638
|
+
|
3639
|
+
|
4082
3640
|
function parseFileHeader(index) {
|
4083
|
-
var fileHeader = /^(---|\+\+\+)\s+(.*)
|
3641
|
+
var fileHeader = /^(---|\+\+\+)\s+(.*)$/.exec(diffstr[i]);
|
3642
|
+
|
4084
3643
|
if (fileHeader) {
|
4085
3644
|
var keyPrefix = fileHeader[1] === '---' ? 'old' : 'new';
|
4086
3645
|
var data = fileHeader[2].split('\t', 2);
|
4087
3646
|
var fileName = data[0].replace(/\\\\/g, '\\');
|
3647
|
+
|
4088
3648
|
if (/^".*"$/.test(fileName)) {
|
4089
3649
|
fileName = fileName.substr(1, fileName.length - 2);
|
4090
3650
|
}
|
3651
|
+
|
4091
3652
|
index[keyPrefix + 'FileName'] = fileName;
|
4092
3653
|
index[keyPrefix + 'Header'] = (data[1] || '').trim();
|
4093
3654
|
i++;
|
4094
3655
|
}
|
4095
|
-
}
|
4096
|
-
|
4097
|
-
// Parses a hunk
|
3656
|
+
} // Parses a hunk
|
4098
3657
|
// This assumes that we are at the start of a hunk.
|
3658
|
+
|
3659
|
+
|
4099
3660
|
function parseHunk() {
|
4100
3661
|
var chunkHeaderIndex = i,
|
4101
|
-
|
4102
|
-
|
3662
|
+
chunkHeaderLine = diffstr[i++],
|
3663
|
+
chunkHeader = chunkHeaderLine.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
|
4103
3664
|
var hunk = {
|
4104
3665
|
oldStart: +chunkHeader[1],
|
4105
3666
|
oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],
|
4106
3667
|
newStart: +chunkHeader[3],
|
4107
3668
|
newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],
|
4108
|
-
lines: []
|
4109
|
-
|
4110
|
-
|
4111
|
-
// Unified Diff Format quirk: If the chunk size is 0,
|
3669
|
+
lines: [],
|
3670
|
+
linedelimiters: []
|
3671
|
+
}; // Unified Diff Format quirk: If the chunk size is 0,
|
4112
3672
|
// the first number is one lower than one would expect.
|
4113
3673
|
// https://www.artima.com/weblogs/viewpost.jsp?thread=164293
|
3674
|
+
|
4114
3675
|
if (hunk.oldLines === 0) {
|
4115
3676
|
hunk.oldStart += 1;
|
4116
3677
|
}
|
3678
|
+
|
4117
3679
|
if (hunk.newLines === 0) {
|
4118
3680
|
hunk.newStart += 1;
|
4119
3681
|
}
|
3682
|
+
|
4120
3683
|
var addCount = 0,
|
4121
|
-
|
4122
|
-
|
4123
|
-
|
3684
|
+
removeCount = 0;
|
3685
|
+
|
3686
|
+
for (; i < diffstr.length; i++) {
|
3687
|
+
// Lines starting with '---' could be mistaken for the "remove line" operation
|
3688
|
+
// But they could be the header for the next file. Therefore prune such cases out.
|
3689
|
+
if (diffstr[i].indexOf('--- ') === 0 && i + 2 < diffstr.length && diffstr[i + 1].indexOf('+++ ') === 0 && diffstr[i + 2].indexOf('@@') === 0) {
|
3690
|
+
break;
|
3691
|
+
}
|
3692
|
+
|
4124
3693
|
var operation = diffstr[i].length == 0 && i != diffstr.length - 1 ? ' ' : diffstr[i][0];
|
3694
|
+
|
4125
3695
|
if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\') {
|
4126
3696
|
hunk.lines.push(diffstr[i]);
|
3697
|
+
hunk.linedelimiters.push(delimiters[i] || '\n');
|
3698
|
+
|
4127
3699
|
if (operation === '+') {
|
4128
3700
|
addCount++;
|
4129
3701
|
} else if (operation === '-') {
|
@@ -4133,30 +3705,37 @@ function lib_parsePatch(uniDiff) {
|
|
4133
3705
|
removeCount++;
|
4134
3706
|
}
|
4135
3707
|
} else {
|
4136
|
-
|
3708
|
+
break;
|
4137
3709
|
}
|
4138
|
-
}
|
3710
|
+
} // Handle the empty block count case
|
3711
|
+
|
4139
3712
|
|
4140
|
-
// Handle the empty block count case
|
4141
3713
|
if (!addCount && hunk.newLines === 1) {
|
4142
3714
|
hunk.newLines = 0;
|
4143
3715
|
}
|
3716
|
+
|
4144
3717
|
if (!removeCount && hunk.oldLines === 1) {
|
4145
3718
|
hunk.oldLines = 0;
|
4146
|
-
}
|
3719
|
+
} // Perform optional sanity checking
|
4147
3720
|
|
4148
|
-
|
4149
|
-
if (
|
4150
|
-
|
4151
|
-
|
4152
|
-
|
4153
|
-
|
3721
|
+
|
3722
|
+
if (options.strict) {
|
3723
|
+
if (addCount !== hunk.newLines) {
|
3724
|
+
throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
|
3725
|
+
}
|
3726
|
+
|
3727
|
+
if (removeCount !== hunk.oldLines) {
|
3728
|
+
throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
|
3729
|
+
}
|
4154
3730
|
}
|
3731
|
+
|
4155
3732
|
return hunk;
|
4156
3733
|
}
|
3734
|
+
|
4157
3735
|
while (i < diffstr.length) {
|
4158
3736
|
parseIndex();
|
4159
3737
|
}
|
3738
|
+
|
4160
3739
|
return list;
|
4161
3740
|
}
|
4162
3741
|
|
@@ -4165,275 +3744,210 @@ function lib_parsePatch(uniDiff) {
|
|
4165
3744
|
// start of 2, this will iterate 2, 3, 1, 4, 0.
|
4166
3745
|
function lib_distanceIterator (start, minLine, maxLine) {
|
4167
3746
|
var wantForward = true,
|
4168
|
-
|
4169
|
-
|
4170
|
-
|
3747
|
+
backwardExhausted = false,
|
3748
|
+
forwardExhausted = false,
|
3749
|
+
localOffset = 1;
|
4171
3750
|
return function iterator() {
|
4172
3751
|
if (wantForward && !forwardExhausted) {
|
4173
3752
|
if (backwardExhausted) {
|
4174
3753
|
localOffset++;
|
4175
3754
|
} else {
|
4176
3755
|
wantForward = false;
|
4177
|
-
}
|
4178
|
-
|
4179
|
-
// Check if trying to fit beyond text length, and if not, check it fits
|
3756
|
+
} // Check if trying to fit beyond text length, and if not, check it fits
|
4180
3757
|
// after offset location (or desired location on first iteration)
|
3758
|
+
|
3759
|
+
|
4181
3760
|
if (start + localOffset <= maxLine) {
|
4182
|
-
return
|
3761
|
+
return localOffset;
|
4183
3762
|
}
|
3763
|
+
|
4184
3764
|
forwardExhausted = true;
|
4185
3765
|
}
|
3766
|
+
|
4186
3767
|
if (!backwardExhausted) {
|
4187
3768
|
if (!forwardExhausted) {
|
4188
3769
|
wantForward = true;
|
4189
|
-
}
|
4190
|
-
|
4191
|
-
// Check if trying to fit before text beginning, and if not, check it fits
|
3770
|
+
} // Check if trying to fit before text beginning, and if not, check it fits
|
4192
3771
|
// before offset location
|
3772
|
+
|
3773
|
+
|
4193
3774
|
if (minLine <= start - localOffset) {
|
4194
|
-
return
|
3775
|
+
return -localOffset++;
|
4195
3776
|
}
|
3777
|
+
|
4196
3778
|
backwardExhausted = true;
|
4197
3779
|
return iterator();
|
4198
|
-
}
|
4199
|
-
|
4200
|
-
// We tried to fit hunk before text beginning and beyond text length, then
|
3780
|
+
} // We tried to fit hunk before text beginning and beyond text length, then
|
4201
3781
|
// hunk can't fit on the text. Return undefined
|
3782
|
+
|
4202
3783
|
};
|
4203
3784
|
}
|
4204
3785
|
|
4205
3786
|
function lib_applyPatch(source, uniDiff) {
|
4206
3787
|
var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
|
3788
|
+
|
4207
3789
|
if (typeof uniDiff === 'string') {
|
4208
3790
|
uniDiff = lib_parsePatch(uniDiff);
|
4209
3791
|
}
|
3792
|
+
|
4210
3793
|
if (Array.isArray(uniDiff)) {
|
4211
3794
|
if (uniDiff.length > 1) {
|
4212
3795
|
throw new Error('applyPatch only works with a single input.');
|
4213
3796
|
}
|
4214
|
-
uniDiff = uniDiff[0];
|
4215
|
-
}
|
4216
|
-
if (options.autoConvertLineEndings || options.autoConvertLineEndings == null) {
|
4217
|
-
if (hasOnlyWinLineEndings(source) && isUnix(uniDiff)) {
|
4218
|
-
uniDiff = unixToWin(uniDiff);
|
4219
|
-
} else if (hasOnlyUnixLineEndings(source) && isWin(uniDiff)) {
|
4220
|
-
uniDiff = winToUnix(uniDiff);
|
4221
|
-
}
|
4222
|
-
}
|
4223
|
-
|
4224
|
-
// Apply the diff to the input
|
4225
|
-
var lines = source.split('\n'),
|
4226
|
-
hunks = uniDiff.hunks,
|
4227
|
-
compareLine = options.compareLine || function (lineNumber, line, operation, patchContent) {
|
4228
|
-
return line === patchContent;
|
4229
|
-
},
|
4230
|
-
fuzzFactor = options.fuzzFactor || 0,
|
4231
|
-
minLine = 0;
|
4232
|
-
if (fuzzFactor < 0 || !Number.isInteger(fuzzFactor)) {
|
4233
|
-
throw new Error('fuzzFactor must be a non-negative integer');
|
4234
|
-
}
|
4235
3797
|
|
4236
|
-
|
4237
|
-
|
4238
|
-
return source;
|
4239
|
-
}
|
3798
|
+
uniDiff = uniDiff[0];
|
3799
|
+
} // Apply the diff to the input
|
4240
3800
|
|
4241
|
-
// Before anything else, handle EOFNL insertion/removal. If the patch tells us to make a change
|
4242
|
-
// to the EOFNL that is redundant/impossible - i.e. to remove a newline that's not there, or add a
|
4243
|
-
// newline that already exists - then we either return false and fail to apply the patch (if
|
4244
|
-
// fuzzFactor is 0) or simply ignore the problem and do nothing (if fuzzFactor is >0).
|
4245
|
-
// If we do need to remove/add a newline at EOF, this will always be in the final hunk:
|
4246
|
-
var prevLine = '',
|
4247
|
-
removeEOFNL = false,
|
4248
|
-
addEOFNL = false;
|
4249
|
-
for (var i = 0; i < hunks[hunks.length - 1].lines.length; i++) {
|
4250
|
-
var line = hunks[hunks.length - 1].lines[i];
|
4251
|
-
if (line[0] == '\\') {
|
4252
|
-
if (prevLine[0] == '+') {
|
4253
|
-
removeEOFNL = true;
|
4254
|
-
} else if (prevLine[0] == '-') {
|
4255
|
-
addEOFNL = true;
|
4256
|
-
}
|
4257
|
-
}
|
4258
|
-
prevLine = line;
|
4259
|
-
}
|
4260
|
-
if (removeEOFNL) {
|
4261
|
-
if (addEOFNL) {
|
4262
|
-
// This means the final line gets changed but doesn't have a trailing newline in either the
|
4263
|
-
// original or patched version. In that case, we do nothing if fuzzFactor > 0, and if
|
4264
|
-
// fuzzFactor is 0, we simply validate that the source file has no trailing newline.
|
4265
|
-
if (!fuzzFactor && lines[lines.length - 1] == '') {
|
4266
|
-
return false;
|
4267
|
-
}
|
4268
|
-
} else if (lines[lines.length - 1] == '') {
|
4269
|
-
lines.pop();
|
4270
|
-
} else if (!fuzzFactor) {
|
4271
|
-
return false;
|
4272
|
-
}
|
4273
|
-
} else if (addEOFNL) {
|
4274
|
-
if (lines[lines.length - 1] != '') {
|
4275
|
-
lines.push('');
|
4276
|
-
} else if (!fuzzFactor) {
|
4277
|
-
return false;
|
4278
|
-
}
|
4279
|
-
}
|
4280
3801
|
|
3802
|
+
var lines = source.split(/\r\n|[\n\v\f\r\x85]/),
|
3803
|
+
delimiters = source.match(/\r\n|[\n\v\f\r\x85]/g) || [],
|
3804
|
+
hunks = uniDiff.hunks,
|
3805
|
+
compareLine = options.compareLine || function (lineNumber, line, operation, patchContent) {
|
3806
|
+
return line === patchContent;
|
3807
|
+
},
|
3808
|
+
errorCount = 0,
|
3809
|
+
fuzzFactor = options.fuzzFactor || 0,
|
3810
|
+
minLine = 0,
|
3811
|
+
offset = 0,
|
3812
|
+
removeEOFNL,
|
3813
|
+
addEOFNL;
|
4281
3814
|
/**
|
4282
|
-
* Checks if the hunk
|
4283
|
-
* insertions, substitutions, or deletions, while ensuring also that:
|
4284
|
-
* - lines deleted in the hunk match exactly, and
|
4285
|
-
* - wherever an insertion operation or block of insertion operations appears in the hunk, the
|
4286
|
-
* immediately preceding and following lines of context match exactly
|
4287
|
-
*
|
4288
|
-
* `toPos` should be set such that lines[toPos] is meant to match hunkLines[0].
|
4289
|
-
*
|
4290
|
-
* If the hunk can be applied, returns an object with properties `oldLineLastI` and
|
4291
|
-
* `replacementLines`. Otherwise, returns null.
|
3815
|
+
* Checks if the hunk exactly fits on the provided location
|
4292
3816
|
*/
|
4293
|
-
|
4294
|
-
|
4295
|
-
|
4296
|
-
var
|
4297
|
-
|
4298
|
-
|
4299
|
-
|
4300
|
-
|
4301
|
-
|
4302
|
-
|
4303
|
-
|
4304
|
-
|
4305
|
-
|
4306
|
-
|
4307
|
-
|
4308
|
-
} else {
|
4309
|
-
if (!maxErrors || lines[toPos] == null) {
|
4310
|
-
return null;
|
3817
|
+
|
3818
|
+
|
3819
|
+
function hunkFits(hunk, toPos) {
|
3820
|
+
for (var j = 0; j < hunk.lines.length; j++) {
|
3821
|
+
var line = hunk.lines[j],
|
3822
|
+
operation = line.length > 0 ? line[0] : ' ',
|
3823
|
+
content = line.length > 0 ? line.substr(1) : line;
|
3824
|
+
|
3825
|
+
if (operation === ' ' || operation === '-') {
|
3826
|
+
// Context sanity check
|
3827
|
+
if (!compareLine(toPos + 1, lines[toPos], operation, content)) {
|
3828
|
+
errorCount++;
|
3829
|
+
|
3830
|
+
if (errorCount > fuzzFactor) {
|
3831
|
+
return false;
|
4311
3832
|
}
|
4312
|
-
patchedLines[patchedLinesLength] = lines[toPos];
|
4313
|
-
return applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1);
|
4314
3833
|
}
|
4315
|
-
}
|
4316
|
-
if (operation === '+') {
|
4317
|
-
if (!lastContextLineMatched) {
|
4318
|
-
return null;
|
4319
|
-
}
|
4320
|
-
patchedLines[patchedLinesLength] = content;
|
4321
|
-
patchedLinesLength++;
|
4322
|
-
nConsecutiveOldContextLines = 0;
|
4323
|
-
nextContextLineMustMatch = true;
|
4324
|
-
}
|
4325
|
-
if (operation === ' ') {
|
4326
|
-
nConsecutiveOldContextLines++;
|
4327
|
-
patchedLines[patchedLinesLength] = lines[toPos];
|
4328
|
-
if (compareLine(toPos + 1, lines[toPos], operation, content)) {
|
4329
|
-
patchedLinesLength++;
|
4330
|
-
lastContextLineMatched = true;
|
4331
|
-
nextContextLineMustMatch = false;
|
4332
|
-
toPos++;
|
4333
|
-
} else {
|
4334
|
-
if (nextContextLineMustMatch || !maxErrors) {
|
4335
|
-
return null;
|
4336
|
-
}
|
4337
3834
|
|
4338
|
-
|
4339
|
-
// 1. lines contains a *substitution* not included in the patch context, or
|
4340
|
-
// 2. lines contains an *insertion* not included in the patch context, or
|
4341
|
-
// 3. lines contains a *deletion* not included in the patch context
|
4342
|
-
// The first two options are of course only possible if the line from lines is non-null -
|
4343
|
-
// i.e. only option 3 is possible if we've overrun the end of the old file.
|
4344
|
-
return lines[toPos] && (applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength + 1) || applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1)) || applyHunk(hunkLines, toPos, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength);
|
4345
|
-
}
|
3835
|
+
toPos++;
|
4346
3836
|
}
|
4347
3837
|
}
|
4348
3838
|
|
4349
|
-
|
4350
|
-
|
4351
|
-
// that starts in this hunk's trailing context.
|
4352
|
-
patchedLinesLength -= nConsecutiveOldContextLines;
|
4353
|
-
toPos -= nConsecutiveOldContextLines;
|
4354
|
-
patchedLines.length = patchedLinesLength;
|
4355
|
-
return {
|
4356
|
-
patchedLines: patchedLines,
|
4357
|
-
oldLineLastI: toPos - 1
|
4358
|
-
};
|
4359
|
-
}
|
4360
|
-
var resultLines = [];
|
3839
|
+
return true;
|
3840
|
+
} // Search best fit offsets for each hunk based on the previous ones
|
4361
3841
|
|
4362
|
-
|
4363
|
-
var
|
4364
|
-
|
4365
|
-
|
4366
|
-
|
4367
|
-
|
4368
|
-
var
|
4369
|
-
|
4370
|
-
|
4371
|
-
|
4372
|
-
|
4373
|
-
hunkResult = applyHunk(hunk.lines, toPos, maxErrors);
|
4374
|
-
if (hunkResult) {
|
4375
|
-
break;
|
4376
|
-
}
|
4377
|
-
}
|
4378
|
-
if (hunkResult) {
|
3842
|
+
|
3843
|
+
for (var i = 0; i < hunks.length; i++) {
|
3844
|
+
var hunk = hunks[i],
|
3845
|
+
maxLine = lines.length - hunk.oldLines,
|
3846
|
+
localOffset = 0,
|
3847
|
+
toPos = offset + hunk.oldStart - 1;
|
3848
|
+
var iterator = lib_distanceIterator(toPos, minLine, maxLine);
|
3849
|
+
|
3850
|
+
for (; localOffset !== undefined; localOffset = iterator()) {
|
3851
|
+
if (hunkFits(hunk, toPos + localOffset)) {
|
3852
|
+
hunk.offset = offset += localOffset;
|
4379
3853
|
break;
|
4380
3854
|
}
|
4381
3855
|
}
|
4382
|
-
|
3856
|
+
|
3857
|
+
if (localOffset === undefined) {
|
4383
3858
|
return false;
|
4384
|
-
}
|
3859
|
+
} // Set lower text limit to end of the current hunk, so next ones don't try
|
3860
|
+
// to fit over already patched text
|
4385
3861
|
|
4386
|
-
// Copy everything from the end of where we applied the last hunk to the start of this hunk
|
4387
|
-
for (var _i2 = minLine; _i2 < toPos; _i2++) {
|
4388
|
-
resultLines.push(lines[_i2]);
|
4389
|
-
}
|
4390
3862
|
|
4391
|
-
|
4392
|
-
|
4393
|
-
|
4394
|
-
|
3863
|
+
minLine = hunk.offset + hunk.oldStart + hunk.oldLines;
|
3864
|
+
} // Apply patch hunks
|
3865
|
+
|
3866
|
+
|
3867
|
+
var diffOffset = 0;
|
3868
|
+
|
3869
|
+
for (var _i = 0; _i < hunks.length; _i++) {
|
3870
|
+
var _hunk = hunks[_i],
|
3871
|
+
_toPos = _hunk.oldStart + _hunk.offset + diffOffset - 1;
|
3872
|
+
|
3873
|
+
diffOffset += _hunk.newLines - _hunk.oldLines;
|
3874
|
+
|
3875
|
+
for (var j = 0; j < _hunk.lines.length; j++) {
|
3876
|
+
var line = _hunk.lines[j],
|
3877
|
+
operation = line.length > 0 ? line[0] : ' ',
|
3878
|
+
content = line.length > 0 ? line.substr(1) : line,
|
3879
|
+
delimiter = _hunk.linedelimiters[j];
|
3880
|
+
|
3881
|
+
if (operation === ' ') {
|
3882
|
+
_toPos++;
|
3883
|
+
} else if (operation === '-') {
|
3884
|
+
lines.splice(_toPos, 1);
|
3885
|
+
delimiters.splice(_toPos, 1);
|
3886
|
+
/* istanbul ignore else */
|
3887
|
+
} else if (operation === '+') {
|
3888
|
+
lines.splice(_toPos, 0, content);
|
3889
|
+
delimiters.splice(_toPos, 0, delimiter);
|
3890
|
+
_toPos++;
|
3891
|
+
} else if (operation === '\\') {
|
3892
|
+
var previousOperation = _hunk.lines[j - 1] ? _hunk.lines[j - 1][0] : null;
|
3893
|
+
|
3894
|
+
if (previousOperation === '+') {
|
3895
|
+
removeEOFNL = true;
|
3896
|
+
} else if (previousOperation === '-') {
|
3897
|
+
addEOFNL = true;
|
3898
|
+
}
|
3899
|
+
}
|
4395
3900
|
}
|
3901
|
+
} // Handle EOFNL insertion/removal
|
4396
3902
|
|
4397
|
-
// Set lower text limit to end of the current hunk, so next ones don't try
|
4398
|
-
// to fit over already patched text
|
4399
|
-
minLine = hunkResult.oldLineLastI + 1;
|
4400
3903
|
|
4401
|
-
|
4402
|
-
|
4403
|
-
|
3904
|
+
if (removeEOFNL) {
|
3905
|
+
while (!lines[lines.length - 1]) {
|
3906
|
+
lines.pop();
|
3907
|
+
delimiters.pop();
|
3908
|
+
}
|
3909
|
+
} else if (addEOFNL) {
|
3910
|
+
lines.push('');
|
3911
|
+
delimiters.push('\n');
|
4404
3912
|
}
|
4405
3913
|
|
4406
|
-
|
4407
|
-
|
4408
|
-
resultLines.push(lines[_i4]);
|
3914
|
+
for (var _k = 0; _k < lines.length - 1; _k++) {
|
3915
|
+
lines[_k] = lines[_k] + delimiters[_k];
|
4409
3916
|
}
|
4410
|
-
return resultLines.join('\n');
|
4411
|
-
}
|
4412
3917
|
|
4413
|
-
|
3918
|
+
return lines.join('');
|
3919
|
+
} // Wrapper that supports multiple file patches via callbacks.
|
3920
|
+
|
4414
3921
|
function lib_applyPatches(uniDiff, options) {
|
4415
3922
|
if (typeof uniDiff === 'string') {
|
4416
3923
|
uniDiff = lib_parsePatch(uniDiff);
|
4417
3924
|
}
|
3925
|
+
|
4418
3926
|
var currentIndex = 0;
|
3927
|
+
|
4419
3928
|
function processIndex() {
|
4420
3929
|
var index = uniDiff[currentIndex++];
|
3930
|
+
|
4421
3931
|
if (!index) {
|
4422
3932
|
return options.complete();
|
4423
3933
|
}
|
3934
|
+
|
4424
3935
|
options.loadFile(index, function (err, data) {
|
4425
3936
|
if (err) {
|
4426
3937
|
return options.complete(err);
|
4427
3938
|
}
|
3939
|
+
|
4428
3940
|
var updatedContent = lib_applyPatch(data, index, options);
|
4429
3941
|
options.patched(index, updatedContent, function (err) {
|
4430
3942
|
if (err) {
|
4431
3943
|
return options.complete(err);
|
4432
3944
|
}
|
3945
|
+
|
4433
3946
|
processIndex();
|
4434
3947
|
});
|
4435
3948
|
});
|
4436
3949
|
}
|
3950
|
+
|
4437
3951
|
processIndex();
|
4438
3952
|
}
|
4439
3953
|
|
@@ -4441,238 +3955,202 @@ function lib_structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader
|
|
4441
3955
|
if (!options) {
|
4442
3956
|
options = {};
|
4443
3957
|
}
|
4444
|
-
|
4445
|
-
options = {
|
4446
|
-
callback: options
|
4447
|
-
};
|
4448
|
-
}
|
3958
|
+
|
4449
3959
|
if (typeof options.context === 'undefined') {
|
4450
3960
|
options.context = 4;
|
4451
3961
|
}
|
4452
|
-
|
4453
|
-
|
4454
|
-
|
4455
|
-
if (!
|
4456
|
-
return
|
4457
|
-
} else {
|
4458
|
-
var _options = options,
|
4459
|
-
_callback = _options.callback;
|
4460
|
-
lib_diffLines(oldStr, newStr, _objectSpread2(_objectSpread2({}, options), {}, {
|
4461
|
-
callback: function callback(diff) {
|
4462
|
-
var patch = diffLinesResultToPatch(diff);
|
4463
|
-
_callback(patch);
|
4464
|
-
}
|
4465
|
-
}));
|
3962
|
+
|
3963
|
+
var diff = lib_diffLines(oldStr, newStr, options);
|
3964
|
+
|
3965
|
+
if (!diff) {
|
3966
|
+
return;
|
4466
3967
|
}
|
4467
|
-
function diffLinesResultToPatch(diff) {
|
4468
|
-
// STEP 1: Build up the patch with no "" lines and with the arrays
|
4469
|
-
// of lines containing trailing newline characters. We'll tidy up later...
|
4470
3968
|
|
4471
|
-
|
4472
|
-
|
4473
|
-
|
4474
|
-
|
4475
|
-
value: '',
|
4476
|
-
lines: []
|
4477
|
-
}); // Append an empty value to make cleanup easier
|
3969
|
+
diff.push({
|
3970
|
+
value: '',
|
3971
|
+
lines: []
|
3972
|
+
}); // Append an empty value to make cleanup easier
|
4478
3973
|
|
4479
|
-
|
4480
|
-
|
4481
|
-
|
4482
|
-
|
4483
|
-
|
4484
|
-
|
4485
|
-
|
3974
|
+
function contextLines(lines) {
|
3975
|
+
return lines.map(function (entry) {
|
3976
|
+
return ' ' + entry;
|
3977
|
+
});
|
3978
|
+
}
|
3979
|
+
|
3980
|
+
var hunks = [];
|
3981
|
+
var oldRangeStart = 0,
|
4486
3982
|
newRangeStart = 0,
|
4487
3983
|
curRange = [],
|
4488
3984
|
oldLine = 1,
|
4489
3985
|
newLine = 1;
|
4490
|
-
var _loop = function _loop() {
|
4491
|
-
var current = diff[i],
|
4492
|
-
lines = current.lines || splitLines(current.value);
|
4493
|
-
current.lines = lines;
|
4494
|
-
if (current.added || current.removed) {
|
4495
|
-
var _curRange;
|
4496
|
-
// If we have previous context, start with that
|
4497
|
-
if (!oldRangeStart) {
|
4498
|
-
var prev = diff[i - 1];
|
4499
|
-
oldRangeStart = oldLine;
|
4500
|
-
newRangeStart = newLine;
|
4501
|
-
if (prev) {
|
4502
|
-
curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
|
4503
|
-
oldRangeStart -= curRange.length;
|
4504
|
-
newRangeStart -= curRange.length;
|
4505
|
-
}
|
4506
|
-
}
|
4507
3986
|
|
4508
|
-
|
4509
|
-
|
4510
|
-
|
4511
|
-
|
3987
|
+
var _loop = function _loop(i) {
|
3988
|
+
var current = diff[i],
|
3989
|
+
lines = current.lines || current.value.replace(/\n$/, '').split('\n');
|
3990
|
+
current.lines = lines;
|
4512
3991
|
|
4513
|
-
|
4514
|
-
|
4515
|
-
|
4516
|
-
|
4517
|
-
|
3992
|
+
if (current.added || current.removed) {
|
3993
|
+
var _curRange;
|
3994
|
+
|
3995
|
+
// If we have previous context, start with that
|
3996
|
+
if (!oldRangeStart) {
|
3997
|
+
var prev = diff[i - 1];
|
3998
|
+
oldRangeStart = oldLine;
|
3999
|
+
newRangeStart = newLine;
|
4000
|
+
|
4001
|
+
if (prev) {
|
4002
|
+
curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
|
4003
|
+
oldRangeStart -= curRange.length;
|
4004
|
+
newRangeStart -= curRange.length;
|
4518
4005
|
}
|
4006
|
+
} // Output our changes
|
4007
|
+
|
4008
|
+
|
4009
|
+
(_curRange = curRange).push.apply(_curRange, lib_toConsumableArray(lines.map(function (entry) {
|
4010
|
+
return (current.added ? '+' : '-') + entry;
|
4011
|
+
}))); // Track the updated file position
|
4012
|
+
|
4013
|
+
|
4014
|
+
if (current.added) {
|
4015
|
+
newLine += lines.length;
|
4519
4016
|
} else {
|
4520
|
-
// Identical context lines. Track line changes
|
4521
|
-
if (oldRangeStart) {
|
4522
|
-
// Close out any changes that have been output (or join overlapping)
|
4523
|
-
if (lines.length <= options.context * 2 && i < diff.length - 2) {
|
4524
|
-
var _curRange2;
|
4525
|
-
// Overlapping
|
4526
|
-
(_curRange2 = curRange).push.apply(_curRange2, lib_toConsumableArray(contextLines(lines)));
|
4527
|
-
} else {
|
4528
|
-
var _curRange3;
|
4529
|
-
// end the range and output
|
4530
|
-
var contextSize = Math.min(lines.length, options.context);
|
4531
|
-
(_curRange3 = curRange).push.apply(_curRange3, lib_toConsumableArray(contextLines(lines.slice(0, contextSize))));
|
4532
|
-
var _hunk = {
|
4533
|
-
oldStart: oldRangeStart,
|
4534
|
-
oldLines: oldLine - oldRangeStart + contextSize,
|
4535
|
-
newStart: newRangeStart,
|
4536
|
-
newLines: newLine - newRangeStart + contextSize,
|
4537
|
-
lines: curRange
|
4538
|
-
};
|
4539
|
-
hunks.push(_hunk);
|
4540
|
-
oldRangeStart = 0;
|
4541
|
-
newRangeStart = 0;
|
4542
|
-
curRange = [];
|
4543
|
-
}
|
4544
|
-
}
|
4545
4017
|
oldLine += lines.length;
|
4546
|
-
newLine += lines.length;
|
4547
4018
|
}
|
4548
|
-
}
|
4549
|
-
|
4550
|
-
|
4551
|
-
|
4019
|
+
} else {
|
4020
|
+
// Identical context lines. Track line changes
|
4021
|
+
if (oldRangeStart) {
|
4022
|
+
// Close out any changes that have been output (or join overlapping)
|
4023
|
+
if (lines.length <= options.context * 2 && i < diff.length - 2) {
|
4024
|
+
var _curRange2;
|
4552
4025
|
|
4553
|
-
|
4554
|
-
|
4555
|
-
for (var _i = 0, _hunks = hunks; _i < _hunks.length; _i++) {
|
4556
|
-
var hunk = _hunks[_i];
|
4557
|
-
for (var _i2 = 0; _i2 < hunk.lines.length; _i2++) {
|
4558
|
-
if (hunk.lines[_i2].endsWith('\n')) {
|
4559
|
-
hunk.lines[_i2] = hunk.lines[_i2].slice(0, -1);
|
4026
|
+
// Overlapping
|
4027
|
+
(_curRange2 = curRange).push.apply(_curRange2, lib_toConsumableArray(contextLines(lines)));
|
4560
4028
|
} else {
|
4561
|
-
|
4562
|
-
|
4029
|
+
var _curRange3;
|
4030
|
+
|
4031
|
+
// end the range and output
|
4032
|
+
var contextSize = Math.min(lines.length, options.context);
|
4033
|
+
|
4034
|
+
(_curRange3 = curRange).push.apply(_curRange3, lib_toConsumableArray(contextLines(lines.slice(0, contextSize))));
|
4035
|
+
|
4036
|
+
var hunk = {
|
4037
|
+
oldStart: oldRangeStart,
|
4038
|
+
oldLines: oldLine - oldRangeStart + contextSize,
|
4039
|
+
newStart: newRangeStart,
|
4040
|
+
newLines: newLine - newRangeStart + contextSize,
|
4041
|
+
lines: curRange
|
4042
|
+
};
|
4043
|
+
|
4044
|
+
if (i >= diff.length - 2 && lines.length <= options.context) {
|
4045
|
+
// EOF is inside this hunk
|
4046
|
+
var oldEOFNewline = /\n$/.test(oldStr);
|
4047
|
+
var newEOFNewline = /\n$/.test(newStr);
|
4048
|
+
var noNlBeforeAdds = lines.length == 0 && curRange.length > hunk.oldLines;
|
4049
|
+
|
4050
|
+
if (!oldEOFNewline && noNlBeforeAdds && oldStr.length > 0) {
|
4051
|
+
// special case: old has no eol and no trailing context; no-nl can end up before adds
|
4052
|
+
// however, if the old file is empty, do not output the no-nl line
|
4053
|
+
curRange.splice(hunk.oldLines, 0, '\');
|
4054
|
+
}
|
4055
|
+
|
4056
|
+
if (!oldEOFNewline && !noNlBeforeAdds || !newEOFNewline) {
|
4057
|
+
curRange.push('\');
|
4058
|
+
}
|
4059
|
+
}
|
4060
|
+
|
4061
|
+
hunks.push(hunk);
|
4062
|
+
oldRangeStart = 0;
|
4063
|
+
newRangeStart = 0;
|
4064
|
+
curRange = [];
|
4563
4065
|
}
|
4564
4066
|
}
|
4067
|
+
|
4068
|
+
oldLine += lines.length;
|
4069
|
+
newLine += lines.length;
|
4565
4070
|
}
|
4566
|
-
|
4567
|
-
|
4568
|
-
|
4569
|
-
|
4570
|
-
newHeader: newHeader,
|
4571
|
-
hunks: hunks
|
4572
|
-
};
|
4071
|
+
};
|
4072
|
+
|
4073
|
+
for (var i = 0; i < diff.length; i++) {
|
4074
|
+
_loop(i);
|
4573
4075
|
}
|
4076
|
+
|
4077
|
+
return {
|
4078
|
+
oldFileName: oldFileName,
|
4079
|
+
newFileName: newFileName,
|
4080
|
+
oldHeader: oldHeader,
|
4081
|
+
newHeader: newHeader,
|
4082
|
+
hunks: hunks
|
4083
|
+
};
|
4574
4084
|
}
|
4575
4085
|
function lib_formatPatch(diff) {
|
4576
|
-
if (Array.isArray(diff)) {
|
4577
|
-
return diff.map(lib_formatPatch).join('\n');
|
4578
|
-
}
|
4579
4086
|
var ret = [];
|
4087
|
+
|
4580
4088
|
if (diff.oldFileName == diff.newFileName) {
|
4581
4089
|
ret.push('Index: ' + diff.oldFileName);
|
4582
4090
|
}
|
4091
|
+
|
4583
4092
|
ret.push('===================================================================');
|
4584
4093
|
ret.push('--- ' + diff.oldFileName + (typeof diff.oldHeader === 'undefined' ? '' : '\t' + diff.oldHeader));
|
4585
4094
|
ret.push('+++ ' + diff.newFileName + (typeof diff.newHeader === 'undefined' ? '' : '\t' + diff.newHeader));
|
4095
|
+
|
4586
4096
|
for (var i = 0; i < diff.hunks.length; i++) {
|
4587
|
-
var hunk = diff.hunks[i];
|
4588
|
-
// Unified Diff Format quirk: If the chunk size is 0,
|
4097
|
+
var hunk = diff.hunks[i]; // Unified Diff Format quirk: If the chunk size is 0,
|
4589
4098
|
// the first number is one lower than one would expect.
|
4590
4099
|
// https://www.artima.com/weblogs/viewpost.jsp?thread=164293
|
4100
|
+
|
4591
4101
|
if (hunk.oldLines === 0) {
|
4592
4102
|
hunk.oldStart -= 1;
|
4593
4103
|
}
|
4104
|
+
|
4594
4105
|
if (hunk.newLines === 0) {
|
4595
4106
|
hunk.newStart -= 1;
|
4596
4107
|
}
|
4108
|
+
|
4597
4109
|
ret.push('@@ -' + hunk.oldStart + ',' + hunk.oldLines + ' +' + hunk.newStart + ',' + hunk.newLines + ' @@');
|
4598
4110
|
ret.push.apply(ret, hunk.lines);
|
4599
4111
|
}
|
4112
|
+
|
4600
4113
|
return ret.join('\n') + '\n';
|
4601
4114
|
}
|
4602
4115
|
function lib_createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
|
4603
|
-
|
4604
|
-
if (typeof options === 'function') {
|
4605
|
-
options = {
|
4606
|
-
callback: options
|
4607
|
-
};
|
4608
|
-
}
|
4609
|
-
if (!((_options2 = options) !== null && _options2 !== void 0 && _options2.callback)) {
|
4610
|
-
var patchObj = lib_structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options);
|
4611
|
-
if (!patchObj) {
|
4612
|
-
return;
|
4613
|
-
}
|
4614
|
-
return lib_formatPatch(patchObj);
|
4615
|
-
} else {
|
4616
|
-
var _options3 = options,
|
4617
|
-
_callback2 = _options3.callback;
|
4618
|
-
lib_structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, _objectSpread2(_objectSpread2({}, options), {}, {
|
4619
|
-
callback: function callback(patchObj) {
|
4620
|
-
if (!patchObj) {
|
4621
|
-
_callback2();
|
4622
|
-
} else {
|
4623
|
-
_callback2(lib_formatPatch(patchObj));
|
4624
|
-
}
|
4625
|
-
}
|
4626
|
-
}));
|
4627
|
-
}
|
4116
|
+
return lib_formatPatch(lib_structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options));
|
4628
4117
|
}
|
4629
4118
|
function lib_createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {
|
4630
4119
|
return lib_createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);
|
4631
4120
|
}
|
4632
4121
|
|
4633
|
-
/**
|
4634
|
-
* Split `text` into an array of lines, including the trailing newline character (where present)
|
4635
|
-
*/
|
4636
|
-
function splitLines(text) {
|
4637
|
-
var hasTrailingNl = text.endsWith('\n');
|
4638
|
-
var result = text.split('\n').map(function (line) {
|
4639
|
-
return line + '\n';
|
4640
|
-
});
|
4641
|
-
if (hasTrailingNl) {
|
4642
|
-
result.pop();
|
4643
|
-
} else {
|
4644
|
-
result.push(result.pop().slice(0, -1));
|
4645
|
-
}
|
4646
|
-
return result;
|
4647
|
-
}
|
4648
|
-
|
4649
4122
|
function lib_arrayEqual(a, b) {
|
4650
4123
|
if (a.length !== b.length) {
|
4651
4124
|
return false;
|
4652
4125
|
}
|
4126
|
+
|
4653
4127
|
return lib_arrayStartsWith(a, b);
|
4654
4128
|
}
|
4655
4129
|
function lib_arrayStartsWith(array, start) {
|
4656
4130
|
if (start.length > array.length) {
|
4657
4131
|
return false;
|
4658
4132
|
}
|
4133
|
+
|
4659
4134
|
for (var i = 0; i < start.length; i++) {
|
4660
4135
|
if (start[i] !== array[i]) {
|
4661
4136
|
return false;
|
4662
4137
|
}
|
4663
4138
|
}
|
4139
|
+
|
4664
4140
|
return true;
|
4665
4141
|
}
|
4666
4142
|
|
4667
4143
|
function lib_calcLineCount(hunk) {
|
4668
4144
|
var _calcOldNewLineCount = lib_calcOldNewLineCount(hunk.lines),
|
4669
|
-
|
4670
|
-
|
4145
|
+
oldLines = _calcOldNewLineCount.oldLines,
|
4146
|
+
newLines = _calcOldNewLineCount.newLines;
|
4147
|
+
|
4671
4148
|
if (oldLines !== undefined) {
|
4672
4149
|
hunk.oldLines = oldLines;
|
4673
4150
|
} else {
|
4674
4151
|
delete hunk.oldLines;
|
4675
4152
|
}
|
4153
|
+
|
4676
4154
|
if (newLines !== undefined) {
|
4677
4155
|
hunk.newLines = newLines;
|
4678
4156
|
} else {
|
@@ -4682,14 +4160,14 @@ function lib_calcLineCount(hunk) {
|
|
4682
4160
|
function lib_merge(mine, theirs, base) {
|
4683
4161
|
mine = lib_loadPatch(mine, base);
|
4684
4162
|
theirs = lib_loadPatch(theirs, base);
|
4685
|
-
var ret = {};
|
4686
|
-
|
4687
|
-
// For index we just let it pass through as it doesn't have any necessary meaning.
|
4163
|
+
var ret = {}; // For index we just let it pass through as it doesn't have any necessary meaning.
|
4688
4164
|
// Leaving sanity checks on this to the API consumer that may know more about the
|
4689
4165
|
// meaning in their own context.
|
4166
|
+
|
4690
4167
|
if (mine.index || theirs.index) {
|
4691
4168
|
ret.index = mine.index || theirs.index;
|
4692
4169
|
}
|
4170
|
+
|
4693
4171
|
if (mine.newFileName || theirs.newFileName) {
|
4694
4172
|
if (!lib_fileNameChanged(mine)) {
|
4695
4173
|
// No header or no change in ours, use theirs (and ours if theirs does not exist)
|
@@ -4711,18 +4189,21 @@ function lib_merge(mine, theirs, base) {
|
|
4711
4189
|
ret.newHeader = lib_selectField(ret, mine.newHeader, theirs.newHeader);
|
4712
4190
|
}
|
4713
4191
|
}
|
4192
|
+
|
4714
4193
|
ret.hunks = [];
|
4715
4194
|
var mineIndex = 0,
|
4716
|
-
|
4717
|
-
|
4718
|
-
|
4195
|
+
theirsIndex = 0,
|
4196
|
+
mineOffset = 0,
|
4197
|
+
theirsOffset = 0;
|
4198
|
+
|
4719
4199
|
while (mineIndex < mine.hunks.length || theirsIndex < theirs.hunks.length) {
|
4720
4200
|
var mineCurrent = mine.hunks[mineIndex] || {
|
4721
|
-
|
4722
|
-
|
4723
|
-
|
4724
|
-
|
4725
|
-
|
4201
|
+
oldStart: Infinity
|
4202
|
+
},
|
4203
|
+
theirsCurrent = theirs.hunks[theirsIndex] || {
|
4204
|
+
oldStart: Infinity
|
4205
|
+
};
|
4206
|
+
|
4726
4207
|
if (lib_hunkBefore(mineCurrent, theirsCurrent)) {
|
4727
4208
|
// This patch does not overlap with any of the others, yay.
|
4728
4209
|
ret.hunks.push(lib_cloneHunk(mineCurrent, mineOffset));
|
@@ -4748,23 +4229,30 @@ function lib_merge(mine, theirs, base) {
|
|
4748
4229
|
ret.hunks.push(mergedHunk);
|
4749
4230
|
}
|
4750
4231
|
}
|
4232
|
+
|
4751
4233
|
return ret;
|
4752
4234
|
}
|
4235
|
+
|
4753
4236
|
function lib_loadPatch(param, base) {
|
4754
4237
|
if (typeof param === 'string') {
|
4755
4238
|
if (/^@@/m.test(param) || /^Index:/m.test(param)) {
|
4756
4239
|
return lib_parsePatch(param)[0];
|
4757
4240
|
}
|
4241
|
+
|
4758
4242
|
if (!base) {
|
4759
4243
|
throw new Error('Must provide a base reference or pass in a patch');
|
4760
4244
|
}
|
4245
|
+
|
4761
4246
|
return lib_structuredPatch(undefined, undefined, base, param);
|
4762
4247
|
}
|
4248
|
+
|
4763
4249
|
return param;
|
4764
4250
|
}
|
4251
|
+
|
4765
4252
|
function lib_fileNameChanged(patch) {
|
4766
4253
|
return patch.newFileName && patch.newFileName !== patch.oldFileName;
|
4767
4254
|
}
|
4255
|
+
|
4768
4256
|
function lib_selectField(index, mine, theirs) {
|
4769
4257
|
if (mine === theirs) {
|
4770
4258
|
return mine;
|
@@ -4776,9 +4264,11 @@ function lib_selectField(index, mine, theirs) {
|
|
4776
4264
|
};
|
4777
4265
|
}
|
4778
4266
|
}
|
4267
|
+
|
4779
4268
|
function lib_hunkBefore(test, check) {
|
4780
4269
|
return test.oldStart < check.oldStart && test.oldStart + test.oldLines < check.oldStart;
|
4781
4270
|
}
|
4271
|
+
|
4782
4272
|
function lib_cloneHunk(hunk, offset) {
|
4783
4273
|
return {
|
4784
4274
|
oldStart: hunk.oldStart,
|
@@ -4788,37 +4278,39 @@ function lib_cloneHunk(hunk, offset) {
|
|
4788
4278
|
lines: hunk.lines
|
4789
4279
|
};
|
4790
4280
|
}
|
4281
|
+
|
4791
4282
|
function lib_mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
|
4792
4283
|
// This will generally result in a conflicted hunk, but there are cases where the context
|
4793
4284
|
// is the only overlap where we can successfully merge the content here.
|
4794
4285
|
var mine = {
|
4795
|
-
|
4796
|
-
|
4797
|
-
|
4798
|
-
|
4799
|
-
|
4800
|
-
|
4801
|
-
|
4802
|
-
|
4803
|
-
|
4286
|
+
offset: mineOffset,
|
4287
|
+
lines: mineLines,
|
4288
|
+
index: 0
|
4289
|
+
},
|
4290
|
+
their = {
|
4291
|
+
offset: theirOffset,
|
4292
|
+
lines: theirLines,
|
4293
|
+
index: 0
|
4294
|
+
}; // Handle any leading content
|
4804
4295
|
|
4805
|
-
// Handle any leading content
|
4806
4296
|
lib_insertLeading(hunk, mine, their);
|
4807
|
-
lib_insertLeading(hunk, their, mine);
|
4297
|
+
lib_insertLeading(hunk, their, mine); // Now in the overlap content. Scan through and select the best changes from each.
|
4808
4298
|
|
4809
|
-
// Now in the overlap content. Scan through and select the best changes from each.
|
4810
4299
|
while (mine.index < mine.lines.length && their.index < their.lines.length) {
|
4811
4300
|
var mineCurrent = mine.lines[mine.index],
|
4812
|
-
|
4301
|
+
theirCurrent = their.lines[their.index];
|
4302
|
+
|
4813
4303
|
if ((mineCurrent[0] === '-' || mineCurrent[0] === '+') && (theirCurrent[0] === '-' || theirCurrent[0] === '+')) {
|
4814
4304
|
// Both modified ...
|
4815
4305
|
lib_mutualChange(hunk, mine, their);
|
4816
4306
|
} else if (mineCurrent[0] === '+' && theirCurrent[0] === ' ') {
|
4817
4307
|
var _hunk$lines;
|
4308
|
+
|
4818
4309
|
// Mine inserted
|
4819
4310
|
(_hunk$lines = hunk.lines).push.apply(_hunk$lines, lib_toConsumableArray(lib_collectChange(mine)));
|
4820
4311
|
} else if (theirCurrent[0] === '+' && mineCurrent[0] === ' ') {
|
4821
4312
|
var _hunk$lines2;
|
4313
|
+
|
4822
4314
|
// Theirs inserted
|
4823
4315
|
(_hunk$lines2 = hunk.lines).push.apply(_hunk$lines2, lib_toConsumableArray(lib_collectChange(their)));
|
4824
4316
|
} else if (mineCurrent[0] === '-' && theirCurrent[0] === ' ') {
|
@@ -4836,44 +4328,57 @@ function lib_mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
|
|
4836
4328
|
// Context mismatch
|
4837
4329
|
lib_conflict(hunk, lib_collectChange(mine), lib_collectChange(their));
|
4838
4330
|
}
|
4839
|
-
}
|
4331
|
+
} // Now push anything that may be remaining
|
4332
|
+
|
4840
4333
|
|
4841
|
-
// Now push anything that may be remaining
|
4842
4334
|
lib_insertTrailing(hunk, mine);
|
4843
4335
|
lib_insertTrailing(hunk, their);
|
4844
4336
|
lib_calcLineCount(hunk);
|
4845
4337
|
}
|
4338
|
+
|
4846
4339
|
function lib_mutualChange(hunk, mine, their) {
|
4847
4340
|
var myChanges = lib_collectChange(mine),
|
4848
|
-
|
4341
|
+
theirChanges = lib_collectChange(their);
|
4342
|
+
|
4849
4343
|
if (lib_allRemoves(myChanges) && lib_allRemoves(theirChanges)) {
|
4850
4344
|
// Special case for remove changes that are supersets of one another
|
4851
4345
|
if (lib_arrayStartsWith(myChanges, theirChanges) && lib_skipRemoveSuperset(their, myChanges, myChanges.length - theirChanges.length)) {
|
4852
4346
|
var _hunk$lines3;
|
4347
|
+
|
4853
4348
|
(_hunk$lines3 = hunk.lines).push.apply(_hunk$lines3, lib_toConsumableArray(myChanges));
|
4349
|
+
|
4854
4350
|
return;
|
4855
4351
|
} else if (lib_arrayStartsWith(theirChanges, myChanges) && lib_skipRemoveSuperset(mine, theirChanges, theirChanges.length - myChanges.length)) {
|
4856
4352
|
var _hunk$lines4;
|
4353
|
+
|
4857
4354
|
(_hunk$lines4 = hunk.lines).push.apply(_hunk$lines4, lib_toConsumableArray(theirChanges));
|
4355
|
+
|
4858
4356
|
return;
|
4859
4357
|
}
|
4860
4358
|
} else if (lib_arrayEqual(myChanges, theirChanges)) {
|
4861
4359
|
var _hunk$lines5;
|
4360
|
+
|
4862
4361
|
(_hunk$lines5 = hunk.lines).push.apply(_hunk$lines5, lib_toConsumableArray(myChanges));
|
4362
|
+
|
4863
4363
|
return;
|
4864
4364
|
}
|
4365
|
+
|
4865
4366
|
lib_conflict(hunk, myChanges, theirChanges);
|
4866
4367
|
}
|
4368
|
+
|
4867
4369
|
function lib_removal(hunk, mine, their, swap) {
|
4868
4370
|
var myChanges = lib_collectChange(mine),
|
4869
|
-
|
4371
|
+
theirChanges = lib_collectContext(their, myChanges);
|
4372
|
+
|
4870
4373
|
if (theirChanges.merged) {
|
4871
4374
|
var _hunk$lines6;
|
4375
|
+
|
4872
4376
|
(_hunk$lines6 = hunk.lines).push.apply(_hunk$lines6, lib_toConsumableArray(theirChanges.merged));
|
4873
4377
|
} else {
|
4874
4378
|
lib_conflict(hunk, swap ? theirChanges : myChanges, swap ? myChanges : theirChanges);
|
4875
4379
|
}
|
4876
4380
|
}
|
4381
|
+
|
4877
4382
|
function lib_conflict(hunk, mine, their) {
|
4878
4383
|
hunk.conflict = true;
|
4879
4384
|
hunk.lines.push({
|
@@ -4882,6 +4387,7 @@ function lib_conflict(hunk, mine, their) {
|
|
4882
4387
|
theirs: their
|
4883
4388
|
});
|
4884
4389
|
}
|
4390
|
+
|
4885
4391
|
function lib_insertLeading(hunk, insert, their) {
|
4886
4392
|
while (insert.offset < their.offset && insert.index < insert.lines.length) {
|
4887
4393
|
var line = insert.lines[insert.index++];
|
@@ -4889,22 +4395,25 @@ function lib_insertLeading(hunk, insert, their) {
|
|
4889
4395
|
insert.offset++;
|
4890
4396
|
}
|
4891
4397
|
}
|
4398
|
+
|
4892
4399
|
function lib_insertTrailing(hunk, insert) {
|
4893
4400
|
while (insert.index < insert.lines.length) {
|
4894
4401
|
var line = insert.lines[insert.index++];
|
4895
4402
|
hunk.lines.push(line);
|
4896
4403
|
}
|
4897
4404
|
}
|
4405
|
+
|
4898
4406
|
function lib_collectChange(state) {
|
4899
4407
|
var ret = [],
|
4900
|
-
|
4408
|
+
operation = state.lines[state.index][0];
|
4409
|
+
|
4901
4410
|
while (state.index < state.lines.length) {
|
4902
|
-
var line = state.lines[state.index];
|
4411
|
+
var line = state.lines[state.index]; // Group additions that are immediately after subtractions and treat them as one "atomic" modify change.
|
4903
4412
|
|
4904
|
-
// Group additions that are immediately after subtractions and treat them as one "atomic" modify change.
|
4905
4413
|
if (operation === '-' && line[0] === '+') {
|
4906
4414
|
operation = '+';
|
4907
4415
|
}
|
4416
|
+
|
4908
4417
|
if (operation === line[0]) {
|
4909
4418
|
ret.push(line);
|
4910
4419
|
state.index++;
|
@@ -4912,35 +4421,39 @@ function lib_collectChange(state) {
|
|
4912
4421
|
break;
|
4913
4422
|
}
|
4914
4423
|
}
|
4424
|
+
|
4915
4425
|
return ret;
|
4916
4426
|
}
|
4427
|
+
|
4917
4428
|
function lib_collectContext(state, matchChanges) {
|
4918
4429
|
var changes = [],
|
4919
|
-
|
4920
|
-
|
4921
|
-
|
4922
|
-
|
4430
|
+
merged = [],
|
4431
|
+
matchIndex = 0,
|
4432
|
+
contextChanges = false,
|
4433
|
+
conflicted = false;
|
4434
|
+
|
4923
4435
|
while (matchIndex < matchChanges.length && state.index < state.lines.length) {
|
4924
4436
|
var change = state.lines[state.index],
|
4925
|
-
|
4437
|
+
match = matchChanges[matchIndex]; // Once we've hit our add, then we are done
|
4926
4438
|
|
4927
|
-
// Once we've hit our add, then we are done
|
4928
4439
|
if (match[0] === '+') {
|
4929
4440
|
break;
|
4930
4441
|
}
|
4442
|
+
|
4931
4443
|
contextChanges = contextChanges || change[0] !== ' ';
|
4932
4444
|
merged.push(match);
|
4933
|
-
matchIndex++;
|
4934
|
-
|
4935
|
-
// Consume any additions in the other block as a conflict to attempt
|
4445
|
+
matchIndex++; // Consume any additions in the other block as a conflict to attempt
|
4936
4446
|
// to pull in the remaining context after this
|
4447
|
+
|
4937
4448
|
if (change[0] === '+') {
|
4938
4449
|
conflicted = true;
|
4450
|
+
|
4939
4451
|
while (change[0] === '+') {
|
4940
4452
|
changes.push(change);
|
4941
4453
|
change = state.lines[++state.index];
|
4942
4454
|
}
|
4943
4455
|
}
|
4456
|
+
|
4944
4457
|
if (match.substr(1) === change.substr(1)) {
|
4945
4458
|
changes.push(change);
|
4946
4459
|
state.index++;
|
@@ -4948,35 +4461,44 @@ function lib_collectContext(state, matchChanges) {
|
|
4948
4461
|
conflicted = true;
|
4949
4462
|
}
|
4950
4463
|
}
|
4464
|
+
|
4951
4465
|
if ((matchChanges[matchIndex] || '')[0] === '+' && contextChanges) {
|
4952
4466
|
conflicted = true;
|
4953
4467
|
}
|
4468
|
+
|
4954
4469
|
if (conflicted) {
|
4955
4470
|
return changes;
|
4956
4471
|
}
|
4472
|
+
|
4957
4473
|
while (matchIndex < matchChanges.length) {
|
4958
4474
|
merged.push(matchChanges[matchIndex++]);
|
4959
4475
|
}
|
4476
|
+
|
4960
4477
|
return {
|
4961
4478
|
merged: merged,
|
4962
4479
|
changes: changes
|
4963
4480
|
};
|
4964
4481
|
}
|
4482
|
+
|
4965
4483
|
function lib_allRemoves(changes) {
|
4966
4484
|
return changes.reduce(function (prev, change) {
|
4967
4485
|
return prev && change[0] === '-';
|
4968
4486
|
}, true);
|
4969
4487
|
}
|
4488
|
+
|
4970
4489
|
function lib_skipRemoveSuperset(state, removeChanges, delta) {
|
4971
4490
|
for (var i = 0; i < delta; i++) {
|
4972
4491
|
var changeContent = removeChanges[removeChanges.length - delta + i].substr(1);
|
4492
|
+
|
4973
4493
|
if (state.lines[state.index + i] !== ' ' + changeContent) {
|
4974
4494
|
return false;
|
4975
4495
|
}
|
4976
4496
|
}
|
4497
|
+
|
4977
4498
|
state.index += delta;
|
4978
4499
|
return true;
|
4979
4500
|
}
|
4501
|
+
|
4980
4502
|
function lib_calcOldNewLineCount(lines) {
|
4981
4503
|
var oldLines = 0;
|
4982
4504
|
var newLines = 0;
|
@@ -4984,6 +4506,7 @@ function lib_calcOldNewLineCount(lines) {
|
|
4984
4506
|
if (typeof line !== 'string') {
|
4985
4507
|
var myCount = lib_calcOldNewLineCount(line.mine);
|
4986
4508
|
var theirCount = lib_calcOldNewLineCount(line.theirs);
|
4509
|
+
|
4987
4510
|
if (oldLines !== undefined) {
|
4988
4511
|
if (myCount.oldLines === theirCount.oldLines) {
|
4989
4512
|
oldLines += myCount.oldLines;
|
@@ -4991,6 +4514,7 @@ function lib_calcOldNewLineCount(lines) {
|
|
4991
4514
|
oldLines = undefined;
|
4992
4515
|
}
|
4993
4516
|
}
|
4517
|
+
|
4994
4518
|
if (newLines !== undefined) {
|
4995
4519
|
if (myCount.newLines === theirCount.newLines) {
|
4996
4520
|
newLines += myCount.newLines;
|
@@ -5002,6 +4526,7 @@ function lib_calcOldNewLineCount(lines) {
|
|
5002
4526
|
if (newLines !== undefined && (line[0] === '+' || line[0] === ' ')) {
|
5003
4527
|
newLines++;
|
5004
4528
|
}
|
4529
|
+
|
5005
4530
|
if (oldLines !== undefined && (line[0] === '-' || line[0] === ' ')) {
|
5006
4531
|
oldLines++;
|
5007
4532
|
}
|
@@ -5013,42 +4538,15 @@ function lib_calcOldNewLineCount(lines) {
|
|
5013
4538
|
};
|
5014
4539
|
}
|
5015
4540
|
|
5016
|
-
function reversePatch(structuredPatch) {
|
5017
|
-
if (Array.isArray(structuredPatch)) {
|
5018
|
-
return structuredPatch.map(reversePatch).reverse();
|
5019
|
-
}
|
5020
|
-
return _objectSpread2(_objectSpread2({}, structuredPatch), {}, {
|
5021
|
-
oldFileName: structuredPatch.newFileName,
|
5022
|
-
oldHeader: structuredPatch.newHeader,
|
5023
|
-
newFileName: structuredPatch.oldFileName,
|
5024
|
-
newHeader: structuredPatch.oldHeader,
|
5025
|
-
hunks: structuredPatch.hunks.map(function (hunk) {
|
5026
|
-
return {
|
5027
|
-
oldLines: hunk.newLines,
|
5028
|
-
oldStart: hunk.newStart,
|
5029
|
-
newLines: hunk.oldLines,
|
5030
|
-
newStart: hunk.oldStart,
|
5031
|
-
lines: hunk.lines.map(function (l) {
|
5032
|
-
if (l.startsWith('-')) {
|
5033
|
-
return "+".concat(l.slice(1));
|
5034
|
-
}
|
5035
|
-
if (l.startsWith('+')) {
|
5036
|
-
return "-".concat(l.slice(1));
|
5037
|
-
}
|
5038
|
-
return l;
|
5039
|
-
})
|
5040
|
-
};
|
5041
|
-
})
|
5042
|
-
});
|
5043
|
-
}
|
5044
|
-
|
5045
4541
|
// See: http://code.google.com/p/google-diff-match-patch/wiki/API
|
5046
4542
|
function lib_convertChangesToDMP(changes) {
|
5047
4543
|
var ret = [],
|
5048
|
-
|
5049
|
-
|
4544
|
+
change,
|
4545
|
+
operation;
|
4546
|
+
|
5050
4547
|
for (var i = 0; i < changes.length; i++) {
|
5051
4548
|
change = changes[i];
|
4549
|
+
|
5052
4550
|
if (change.added) {
|
5053
4551
|
operation = 1;
|
5054
4552
|
} else if (change.removed) {
|
@@ -5056,29 +4554,37 @@ function lib_convertChangesToDMP(changes) {
|
|
5056
4554
|
} else {
|
5057
4555
|
operation = 0;
|
5058
4556
|
}
|
4557
|
+
|
5059
4558
|
ret.push([operation, change.value]);
|
5060
4559
|
}
|
4560
|
+
|
5061
4561
|
return ret;
|
5062
4562
|
}
|
5063
4563
|
|
5064
4564
|
function lib_convertChangesToXML(changes) {
|
5065
4565
|
var ret = [];
|
4566
|
+
|
5066
4567
|
for (var i = 0; i < changes.length; i++) {
|
5067
4568
|
var change = changes[i];
|
4569
|
+
|
5068
4570
|
if (change.added) {
|
5069
4571
|
ret.push('<ins>');
|
5070
4572
|
} else if (change.removed) {
|
5071
4573
|
ret.push('<del>');
|
5072
4574
|
}
|
4575
|
+
|
5073
4576
|
ret.push(lib_escapeHTML(change.value));
|
4577
|
+
|
5074
4578
|
if (change.added) {
|
5075
4579
|
ret.push('</ins>');
|
5076
4580
|
} else if (change.removed) {
|
5077
4581
|
ret.push('</del>');
|
5078
4582
|
}
|
5079
4583
|
}
|
4584
|
+
|
5080
4585
|
return ret.join('');
|
5081
4586
|
}
|
4587
|
+
|
5082
4588
|
function lib_escapeHTML(s) {
|
5083
4589
|
var n = s;
|
5084
4590
|
n = n.replace(/&/g, '&');
|
@@ -5415,7 +4921,6 @@ class FileListRenderer {
|
|
5415
4921
|
|
5416
4922
|
|
5417
4923
|
|
5418
|
-
|
5419
4924
|
const defaultLineByLineRendererConfig = Object.assign(Object.assign({}, defaultRenderConfig), { renderNothingWhenEmpty: false, matchingMaxComparisons: 2500, maxLineSizeInBlockForComparison: 200 });
|
5420
4925
|
const genericTemplatesPath = 'generic';
|
5421
4926
|
const line_by_line_renderer_baseTemplatesPath = 'line-by-line';
|
@@ -5546,7 +5051,7 @@ class LineByLineRenderer {
|
|
5546
5051
|
}
|
5547
5052
|
applyRematchMatching(oldLines, newLines, matcher) {
|
5548
5053
|
const comparisons = oldLines.length * newLines.length;
|
5549
|
-
const maxLineSizeInBlock = max(oldLines.concat(newLines).map(elem => elem.content.length));
|
5054
|
+
const maxLineSizeInBlock = Math.max.apply(null, [0].concat(oldLines.concat(newLines).map(elem => elem.content.length)));
|
5550
5055
|
const doMatching = comparisons < this.config.matchingMaxComparisons &&
|
5551
5056
|
maxLineSizeInBlock < this.config.maxLineSizeInBlockForComparison &&
|
5552
5057
|
(this.config.matching === 'lines' || this.config.matching === 'words');
|
@@ -5616,7 +5121,6 @@ class LineByLineRenderer {
|
|
5616
5121
|
|
5617
5122
|
|
5618
5123
|
|
5619
|
-
|
5620
5124
|
const defaultSideBySideRendererConfig = Object.assign(Object.assign({}, defaultRenderConfig), { renderNothingWhenEmpty: false, matchingMaxComparisons: 2500, maxLineSizeInBlockForComparison: 200 });
|
5621
5125
|
const side_by_side_renderer_genericTemplatesPath = 'generic';
|
5622
5126
|
const side_by_side_renderer_baseTemplatesPath = 'side-by-side';
|
@@ -5756,7 +5260,7 @@ class SideBySideRenderer {
|
|
5756
5260
|
}
|
5757
5261
|
applyRematchMatching(oldLines, newLines, matcher) {
|
5758
5262
|
const comparisons = oldLines.length * newLines.length;
|
5759
|
-
const maxLineSizeInBlock = max(oldLines.concat(newLines).map(elem => elem.content.length));
|
5263
|
+
const maxLineSizeInBlock = Math.max.apply(null, [0].concat(oldLines.concat(newLines).map(elem => elem.content.length)));
|
5760
5264
|
const doMatching = comparisons < this.config.matchingMaxComparisons &&
|
5761
5265
|
maxLineSizeInBlock < this.config.maxLineSizeInBlockForComparison &&
|
5762
5266
|
(this.config.matching === 'lines' || this.config.matching === 'words');
|
@@ -5886,7 +5390,7 @@ class HoganJsUtils {
|
|
5886
5390
|
const template = this.preCompiledTemplates[templateKey];
|
5887
5391
|
return template.render(params, partials, indent);
|
5888
5392
|
}
|
5889
|
-
catch (
|
5393
|
+
catch (e) {
|
5890
5394
|
throw new Error(`Could not find template to render '${templateKey}'`);
|
5891
5395
|
}
|
5892
5396
|
}
|