@js-ak/excel-toolbox 1.3.1 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/cjs/lib/template/template-fs.js +678 -197
- package/build/cjs/lib/template/utils/apply-replacements.js +26 -0
- package/build/cjs/lib/template/utils/check-row.js +6 -11
- package/build/cjs/lib/template/utils/column-index-to-letter.js +14 -3
- package/build/cjs/lib/template/utils/extract-xml-declaration.js +22 -0
- package/build/cjs/lib/template/utils/get-by-path.js +18 -0
- package/build/cjs/lib/template/utils/index.js +5 -0
- package/build/cjs/lib/template/utils/update-dimension.js +40 -0
- package/build/cjs/lib/template/utils/validate-worksheet-xml.js +217 -0
- package/build/cjs/lib/zip/create-with-stream.js +67 -107
- package/build/esm/lib/template/template-fs.js +678 -197
- package/build/esm/lib/template/utils/apply-replacements.js +22 -0
- package/build/esm/lib/template/utils/check-row.js +6 -11
- package/build/esm/lib/template/utils/column-index-to-letter.js +14 -3
- package/build/esm/lib/template/utils/extract-xml-declaration.js +19 -0
- package/build/esm/lib/template/utils/get-by-path.js +15 -0
- package/build/esm/lib/template/utils/index.js +5 -0
- package/build/esm/lib/template/utils/update-dimension.js +37 -0
- package/build/esm/lib/template/utils/validate-worksheet-xml.js +214 -0
- package/build/esm/lib/zip/create-with-stream.js +68 -108
- package/build/types/lib/template/template-fs.d.ts +24 -0
- package/build/types/lib/template/utils/apply-replacements.d.ts +13 -0
- package/build/types/lib/template/utils/check-row.d.ts +5 -10
- package/build/types/lib/template/utils/column-index-to-letter.d.ts +11 -3
- package/build/types/lib/template/utils/extract-xml-declaration.d.ts +14 -0
- package/build/types/lib/template/utils/get-by-path.d.ts +8 -0
- package/build/types/lib/template/utils/index.d.ts +5 -0
- package/build/types/lib/template/utils/update-dimension.d.ts +1 -0
- package/build/types/lib/template/utils/validate-worksheet-xml.d.ts +9 -0
- package/package.json +6 -4
@@ -0,0 +1,22 @@
|
|
1
|
+
import { getByPath } from "./get-by-path.js";
|
2
|
+
/**
|
3
|
+
* Replaces placeholders in the given content string with values from the replacements map.
|
4
|
+
*
|
5
|
+
* The function searches for placeholders in the format `${key}` within the content
|
6
|
+
* string, where `key` corresponds to a path in the replacements object.
|
7
|
+
* If a value is found for the key, it replaces the placeholder with the value.
|
8
|
+
* If no value is found, the original placeholder remains unchanged.
|
9
|
+
*
|
10
|
+
* @param content - The string containing placeholders to be replaced.
|
11
|
+
* @param replacements - An object where keys represent placeholder paths and values are the replacements.
|
12
|
+
* @returns A new string with placeholders replaced by corresponding values from the replacements object.
|
13
|
+
*/
|
14
|
+
export const applyReplacements = (content, replacements) => {
|
15
|
+
if (!content) {
|
16
|
+
return "";
|
17
|
+
}
|
18
|
+
return content.replace(/\$\{([^}]+)\}/g, (match, path) => {
|
19
|
+
const value = getByPath(replacements, path);
|
20
|
+
return value !== undefined ? String(value) : match;
|
21
|
+
});
|
22
|
+
};
|
@@ -1,19 +1,14 @@
|
|
1
1
|
/**
|
2
|
-
* Validates
|
2
|
+
* Validates an object representing a single row of data to ensure that its keys
|
3
|
+
* are valid Excel column references. Throws an error if any of the keys are
|
4
|
+
* invalid.
|
3
5
|
*
|
4
|
-
*
|
5
|
-
*
|
6
|
-
* not match this pattern, an error is thrown with a message indicating the
|
7
|
-
* invalid cell reference.
|
8
|
-
*
|
9
|
-
* @param row - An object representing a row of data, where keys are cell
|
10
|
-
* references and values are strings.
|
11
|
-
*
|
12
|
-
* @throws {Error} If any key in the row is not a valid column letter.
|
6
|
+
* @param row An object with string keys that represent the cell references and
|
7
|
+
* string values that represent the values of those cells.
|
13
8
|
*/
|
14
9
|
export function checkRow(row) {
|
15
10
|
for (const key of Object.keys(row)) {
|
16
|
-
if (!/^[A-Z]+$/i.test(key)) {
|
11
|
+
if (!/^[A-Z]+$/i.test(key) || !/^[A-Z]$|^[A-Z][A-Z]$|^[A-Z][A-Z][A-Z]$/i.test(key)) {
|
17
12
|
throw new Error(`Invalid cell reference "${key}" in row. Only column letters (like "A", "B", "C") are allowed.`);
|
18
13
|
}
|
19
14
|
}
|
@@ -1,10 +1,21 @@
|
|
1
1
|
/**
|
2
|
-
* Converts a
|
2
|
+
* Converts a zero-based column index to its corresponding Excel column letter.
|
3
3
|
*
|
4
|
-
* @
|
5
|
-
* @
|
4
|
+
* @throws Will throw an error if the input is not a positive integer.
|
5
|
+
* @param {number} index - The zero-based index of the column to convert.
|
6
|
+
* @returns {string} The corresponding Excel column letter.
|
7
|
+
*
|
8
|
+
* @example
|
9
|
+
* columnIndexToLetter(0); // returns "A"
|
10
|
+
* columnIndexToLetter(25); // returns "Z"
|
11
|
+
* columnIndexToLetter(26); // returns "AA"
|
12
|
+
* columnIndexToLetter(51); // returns "AZ"
|
13
|
+
* columnIndexToLetter(52); // returns "BA"
|
6
14
|
*/
|
7
15
|
export function columnIndexToLetter(index) {
|
16
|
+
if (!Number.isInteger(index) || index < 0) {
|
17
|
+
throw new Error(`Invalid column index: ${index}. Must be a positive integer.`);
|
18
|
+
}
|
8
19
|
let letters = "";
|
9
20
|
while (index >= 0) {
|
10
21
|
letters = String.fromCharCode((index % 26) + 65) + letters;
|
@@ -0,0 +1,19 @@
|
|
1
|
+
/**
|
2
|
+
* Extracts the XML declaration from a given XML string.
|
3
|
+
*
|
4
|
+
* The XML declaration is a string that looks like `<?xml ...?>` and is usually
|
5
|
+
* present at the beginning of an XML file. It contains information about the
|
6
|
+
* XML version, encoding, and standalone status.
|
7
|
+
*
|
8
|
+
* This function returns `null` if the input string does not have a valid XML
|
9
|
+
* declaration.
|
10
|
+
*
|
11
|
+
* @param xmlString - The XML string to extract the declaration from.
|
12
|
+
* @returns The extracted XML declaration string, or `null`.
|
13
|
+
*/
|
14
|
+
export function extractXmlDeclaration(xmlString) {
|
15
|
+
// const declarationRegex = /^<\?xml\s+[^?]+\?>/;
|
16
|
+
const declarationRegex = /^<\?xml\s+version\s*=\s*["'][^"']+["'](\s+(encoding|standalone)\s*=\s*["'][^"']+["'])*\s*\?>/;
|
17
|
+
const match = xmlString.match(declarationRegex);
|
18
|
+
return match ? match[0] : null;
|
19
|
+
}
|
@@ -0,0 +1,15 @@
|
|
1
|
+
/**
|
2
|
+
* Gets a value from an object by a given path.
|
3
|
+
*
|
4
|
+
* @param obj - The object to search.
|
5
|
+
* @param path - The path to the value, separated by dots.
|
6
|
+
* @returns The value at the given path, or undefined if not found.
|
7
|
+
*/
|
8
|
+
export function getByPath(obj, path) {
|
9
|
+
return path.split(".").reduce((acc, key) => {
|
10
|
+
if (acc && typeof acc === "object" && key in acc) {
|
11
|
+
return acc[key];
|
12
|
+
}
|
13
|
+
return undefined;
|
14
|
+
}, obj);
|
15
|
+
}
|
@@ -1,11 +1,16 @@
|
|
1
|
+
export * from "./apply-replacements.js";
|
1
2
|
export * from "./check-row.js";
|
2
3
|
export * from "./check-rows.js";
|
3
4
|
export * from "./check-start-row.js";
|
4
5
|
export * from "./column-index-to-letter.js";
|
5
6
|
export * from "./escape-xml.js";
|
7
|
+
export * from "./extract-xml-declaration.js";
|
8
|
+
export * from "./get-by-path.js";
|
6
9
|
export * from "./get-max-row-number.js";
|
7
10
|
export * from "./get-rows-above.js";
|
8
11
|
export * from "./get-rows-below.js";
|
9
12
|
export * from "./parse-rows.js";
|
10
13
|
export * from "./to-excel-column-object.js";
|
14
|
+
export * from "./update-dimension.js";
|
15
|
+
export * from "./validate-worksheet-xml.js";
|
11
16
|
export * from "./write-rows-to-stream.js";
|
@@ -0,0 +1,37 @@
|
|
1
|
+
export function updateDimension(xml) {
|
2
|
+
const cellRefs = [...xml.matchAll(/<c r="([A-Z]+)(\d+)"/g)];
|
3
|
+
if (cellRefs.length === 0)
|
4
|
+
return xml;
|
5
|
+
let minCol = Infinity, maxCol = -Infinity;
|
6
|
+
let minRow = Infinity, maxRow = -Infinity;
|
7
|
+
for (const [, colStr, rowStr] of cellRefs) {
|
8
|
+
const col = columnLetterToNumber(colStr);
|
9
|
+
const row = parseInt(rowStr, 10);
|
10
|
+
if (col < minCol)
|
11
|
+
minCol = col;
|
12
|
+
if (col > maxCol)
|
13
|
+
maxCol = col;
|
14
|
+
if (row < minRow)
|
15
|
+
minRow = row;
|
16
|
+
if (row > maxRow)
|
17
|
+
maxRow = row;
|
18
|
+
}
|
19
|
+
const newRef = `${columnNumberToLetter(minCol)}${minRow}:${columnNumberToLetter(maxCol)}${maxRow}`;
|
20
|
+
return xml.replace(/<dimension ref="[^"]*"/, `<dimension ref="${newRef}"`);
|
21
|
+
}
|
22
|
+
function columnLetterToNumber(letters) {
|
23
|
+
let num = 0;
|
24
|
+
for (let i = 0; i < letters.length; i++) {
|
25
|
+
num = num * 26 + (letters.charCodeAt(i) - 64);
|
26
|
+
}
|
27
|
+
return num;
|
28
|
+
}
|
29
|
+
function columnNumberToLetter(num) {
|
30
|
+
let letters = "";
|
31
|
+
while (num > 0) {
|
32
|
+
const rem = (num - 1) % 26;
|
33
|
+
letters = String.fromCharCode(65 + rem) + letters;
|
34
|
+
num = Math.floor((num - 1) / 26);
|
35
|
+
}
|
36
|
+
return letters;
|
37
|
+
}
|
@@ -0,0 +1,214 @@
|
|
1
|
+
export function validateWorksheetXml(xml) {
|
2
|
+
const createError = (message, details) => ({
|
3
|
+
error: {
|
4
|
+
details,
|
5
|
+
message,
|
6
|
+
},
|
7
|
+
isValid: false,
|
8
|
+
});
|
9
|
+
// 1. Проверка базовой структуры XML
|
10
|
+
if (!xml.startsWith("<?xml")) {
|
11
|
+
return createError("XML должен начинаться с декларации <?xml>");
|
12
|
+
}
|
13
|
+
if (!xml.includes("<worksheet") || !xml.includes("</worksheet>")) {
|
14
|
+
return createError("Не найден корневой элемент worksheet");
|
15
|
+
}
|
16
|
+
// 2. Проверка наличия обязательных элементов
|
17
|
+
const requiredElements = [
|
18
|
+
{ name: "sheetViews", tag: "<sheetViews>" },
|
19
|
+
{ name: "sheetFormatPr", tag: "<sheetFormatPr" },
|
20
|
+
{ name: "cols", tag: "<cols>" },
|
21
|
+
{ name: "sheetData", tag: "<sheetData>" },
|
22
|
+
{ name: "mergeCells", tag: "<mergeCells" },
|
23
|
+
];
|
24
|
+
for (const { name, tag } of requiredElements) {
|
25
|
+
if (!xml.includes(tag)) {
|
26
|
+
return createError(`Отсутствует обязательный элемент ${name}`);
|
27
|
+
}
|
28
|
+
}
|
29
|
+
// 3. Извлечение и проверка sheetData
|
30
|
+
const sheetDataStart = xml.indexOf("<sheetData>");
|
31
|
+
const sheetDataEnd = xml.indexOf("</sheetData>");
|
32
|
+
if (sheetDataStart === -1 || sheetDataEnd === -1) {
|
33
|
+
return createError("Некорректная структура sheetData");
|
34
|
+
}
|
35
|
+
const sheetDataContent = xml.substring(sheetDataStart + 10, sheetDataEnd);
|
36
|
+
const rows = sheetDataContent.split("</row>");
|
37
|
+
if (rows.length < 2) {
|
38
|
+
return createError("SheetData должен содержать хотя бы одну строку");
|
39
|
+
}
|
40
|
+
// Собираем информацию о всех строках и ячейках
|
41
|
+
const allRows = [];
|
42
|
+
const allCells = [];
|
43
|
+
let prevRowNum = 0;
|
44
|
+
for (const row of rows.slice(0, -1)) {
|
45
|
+
if (!row.includes("<row ")) {
|
46
|
+
return createError("Не найден тег row", `Фрагмент: ${row.substring(0, 50)}...`);
|
47
|
+
}
|
48
|
+
if (!row.includes("<c ")) {
|
49
|
+
return createError("Строка не содержит ячеек", `Строка: ${row.substring(0, 50)}...`);
|
50
|
+
}
|
51
|
+
// Извлекаем номер строки
|
52
|
+
const rowNumMatch = row.match(/<row\s+r="(\d+)"/);
|
53
|
+
if (!rowNumMatch) {
|
54
|
+
return createError("Не указан номер строки (атрибут r)", `Строка: ${row.substring(0, 50)}...`);
|
55
|
+
}
|
56
|
+
const rowNum = parseInt(rowNumMatch[1]);
|
57
|
+
// Проверка уникальности строк
|
58
|
+
if (allRows.includes(rowNum)) {
|
59
|
+
return createError("Найден дубликат номера строки", `Номер строки: ${rowNum}`);
|
60
|
+
}
|
61
|
+
allRows.push(rowNum);
|
62
|
+
// Проверка порядка строк (должны идти по возрастанию)
|
63
|
+
if (rowNum <= prevRowNum) {
|
64
|
+
return createError("Нарушен порядок следования строк", `Текущая строка: ${rowNum}, предыдущая: ${prevRowNum}`);
|
65
|
+
}
|
66
|
+
prevRowNum = rowNum;
|
67
|
+
// Извлекаем все ячейки в строке
|
68
|
+
const cells = row.match(/<c\s+r="([A-Z]+)(\d+)"/g) || [];
|
69
|
+
for (const cell of cells) {
|
70
|
+
const match = cell.match(/<c\s+r="([A-Z]+)(\d+)"/);
|
71
|
+
if (!match) {
|
72
|
+
return createError("Некорректный формат ячейки", `Ячейка: ${cell}`);
|
73
|
+
}
|
74
|
+
const col = match[1];
|
75
|
+
const cellRowNum = parseInt(match[2]);
|
76
|
+
// Проверяем соответствие номера строки
|
77
|
+
if (cellRowNum !== rowNum) {
|
78
|
+
return createError("Несоответствие номера строки в ячейке", `Ожидалось: ${rowNum}, найдено: ${cellRowNum} в ячейке ${col}${cellRowNum}`);
|
79
|
+
}
|
80
|
+
allCells.push({
|
81
|
+
col,
|
82
|
+
row: rowNum,
|
83
|
+
});
|
84
|
+
}
|
85
|
+
}
|
86
|
+
// 4. Проверка mergeCells
|
87
|
+
const mergeCellsStart = xml.indexOf("<mergeCells");
|
88
|
+
const mergeCellsEnd = xml.indexOf("</mergeCells>");
|
89
|
+
if (mergeCellsStart === -1 || mergeCellsEnd === -1) {
|
90
|
+
return createError("Некорректная структура mergeCells");
|
91
|
+
}
|
92
|
+
const mergeCellsContent = xml.substring(mergeCellsStart, mergeCellsEnd);
|
93
|
+
const countMatch = mergeCellsContent.match(/count="(\d+)"/);
|
94
|
+
if (!countMatch) {
|
95
|
+
return createError("Не указано количество объединенных ячеек (атрибут count)");
|
96
|
+
}
|
97
|
+
const mergeCellTags = mergeCellsContent.match(/<mergeCell\s+ref="([A-Z]+\d+:[A-Z]+\d+)"\s*\/>/g);
|
98
|
+
if (!mergeCellTags) {
|
99
|
+
return createError("Не найдены объединенные ячейки");
|
100
|
+
}
|
101
|
+
// Проверка соответствия заявленного количества и фактического
|
102
|
+
if (mergeCellTags.length !== parseInt(countMatch[1])) {
|
103
|
+
return createError("Несоответствие количества объединенных ячеек", `Ожидалось: ${countMatch[1]}, найдено: ${mergeCellTags.length}`);
|
104
|
+
}
|
105
|
+
// Проверка на дублирующиеся mergeCell
|
106
|
+
const mergeRefs = new Set();
|
107
|
+
const duplicates = new Set();
|
108
|
+
for (const mergeTag of mergeCellTags) {
|
109
|
+
const refMatch = mergeTag.match(/ref="([A-Z]+\d+:[A-Z]+\d+)"/);
|
110
|
+
if (!refMatch) {
|
111
|
+
return createError("Некорректный формат объединения ячеек", `Тег: ${mergeTag}`);
|
112
|
+
}
|
113
|
+
const ref = refMatch[1];
|
114
|
+
if (mergeRefs.has(ref)) {
|
115
|
+
duplicates.add(ref);
|
116
|
+
}
|
117
|
+
else {
|
118
|
+
mergeRefs.add(ref);
|
119
|
+
}
|
120
|
+
}
|
121
|
+
if (duplicates.size > 0) {
|
122
|
+
return createError("Найдены дублирующиеся объединения ячеек", `Дубликаты: ${Array.from(duplicates).join(", ")}`);
|
123
|
+
}
|
124
|
+
// Проверка пересекающихся объединений
|
125
|
+
const mergedRanges = Array.from(mergeRefs).map(ref => {
|
126
|
+
const [start, end] = ref.split(":");
|
127
|
+
return {
|
128
|
+
endCol: end.match(/[A-Z]+/)?.[0] || "",
|
129
|
+
endRow: parseInt(end.match(/\d+/)?.[0] || "0"),
|
130
|
+
startCol: start.match(/[A-Z]+/)?.[0] || "",
|
131
|
+
startRow: parseInt(start.match(/\d+/)?.[0] || "0"),
|
132
|
+
};
|
133
|
+
});
|
134
|
+
for (let i = 0; i < mergedRanges.length; i++) {
|
135
|
+
for (let j = i + 1; j < mergedRanges.length; j++) {
|
136
|
+
const a = mergedRanges[i];
|
137
|
+
const b = mergedRanges[j];
|
138
|
+
if (rangesIntersect(a, b)) {
|
139
|
+
return createError("Найдены пересекающиеся объединения ячеек", `Пересекаются: ${getRangeString(a)} и ${getRangeString(b)}`);
|
140
|
+
}
|
141
|
+
}
|
142
|
+
}
|
143
|
+
// 5. Проверка dimension и соответствия с реальными данными
|
144
|
+
const dimensionMatch = xml.match(/<dimension\s+ref="([A-Z]+\d+:[A-Z]+\d+)"\s*\/>/);
|
145
|
+
if (!dimensionMatch) {
|
146
|
+
return createError("Не указана область данных (dimension)");
|
147
|
+
}
|
148
|
+
const [startCell, endCell] = dimensionMatch[1].split(":");
|
149
|
+
const startCol = startCell.match(/[A-Z]+/)?.[0];
|
150
|
+
const startRow = parseInt(startCell.match(/\d+/)?.[0] || "0");
|
151
|
+
const endCol = endCell.match(/[A-Z]+/)?.[0];
|
152
|
+
const endRow = parseInt(endCell.match(/\d+/)?.[0] || "0");
|
153
|
+
if (!startCol || !endCol || isNaN(startRow) || isNaN(endRow)) {
|
154
|
+
return createError("Некорректный формат dimension", `Dimension: ${dimensionMatch[1]}`);
|
155
|
+
}
|
156
|
+
const startColNum = colToNumber(startCol);
|
157
|
+
const endColNum = colToNumber(endCol);
|
158
|
+
// Проверяем все ячейки на вхождение в dimension
|
159
|
+
for (const cell of allCells) {
|
160
|
+
const colNum = colToNumber(cell.col);
|
161
|
+
if (cell.row < startRow || cell.row > endRow) {
|
162
|
+
return createError("Ячейка находится вне указанной области (по строке)", `Ячейка: ${cell.col}${cell.row}, dimension: ${dimensionMatch[1]}`);
|
163
|
+
}
|
164
|
+
if (colNum < startColNum || colNum > endColNum) {
|
165
|
+
return createError("Ячейка находится вне указанной области (по столбцу)", `Ячейка: ${cell.col}${cell.row}, dimension: ${dimensionMatch[1]}`);
|
166
|
+
}
|
167
|
+
}
|
168
|
+
// 6. Дополнительная проверка: все mergeCell ссылаются на существующие ячейки
|
169
|
+
for (const mergeTag of mergeCellTags) {
|
170
|
+
const refMatch = mergeTag.match(/ref="([A-Z]+\d+:[A-Z]+\d+)"/);
|
171
|
+
if (!refMatch) {
|
172
|
+
return createError("Некорректный формат объединения ячеек", `Тег: ${mergeTag}`);
|
173
|
+
}
|
174
|
+
const [cell1, cell2] = refMatch[1].split(":");
|
175
|
+
const cell1Col = cell1.match(/[A-Z]+/)?.[0];
|
176
|
+
const cell1Row = parseInt(cell1.match(/\d+/)?.[0] || "0");
|
177
|
+
const cell2Col = cell2.match(/[A-Z]+/)?.[0];
|
178
|
+
const cell2Row = parseInt(cell2.match(/\d+/)?.[0] || "0");
|
179
|
+
if (!cell1Col || !cell2Col || isNaN(cell1Row) || isNaN(cell2Row)) {
|
180
|
+
return createError("Некорректные координаты объединения ячеек", `Объединение: ${refMatch[1]}`);
|
181
|
+
}
|
182
|
+
// Проверяем что объединяемые ячейки существуют
|
183
|
+
const cell1Exists = allCells.some(c => c.row === cell1Row && c.col === cell1Col);
|
184
|
+
const cell2Exists = allCells.some(c => c.row === cell2Row && c.col === cell2Col);
|
185
|
+
if (!cell1Exists || !cell2Exists) {
|
186
|
+
return createError("Объединение ссылается на несуществующие ячейки", `Объединение: ${refMatch[1]}, отсутствует: ${!cell1Exists ? `${cell1Col}${cell1Row}` : `${cell2Col}${cell2Row}`}`);
|
187
|
+
}
|
188
|
+
}
|
189
|
+
return { isValid: true };
|
190
|
+
}
|
191
|
+
// Вспомогательные функции для проверки пересечений
|
192
|
+
function rangesIntersect(a, b) {
|
193
|
+
const aStartColNum = colToNumber(a.startCol);
|
194
|
+
const aEndColNum = colToNumber(a.endCol);
|
195
|
+
const bStartColNum = colToNumber(b.startCol);
|
196
|
+
const bEndColNum = colToNumber(b.endCol);
|
197
|
+
// Проверяем пересечение по строкам
|
198
|
+
const rowsIntersect = !(a.endRow < b.startRow || a.startRow > b.endRow);
|
199
|
+
// Проверяем пересечение по колонкам
|
200
|
+
const colsIntersect = !(aEndColNum < bStartColNum || aStartColNum > bEndColNum);
|
201
|
+
return rowsIntersect && colsIntersect;
|
202
|
+
}
|
203
|
+
function getRangeString(range) {
|
204
|
+
return `${range.startCol}${range.startRow}:${range.endCol}${range.endRow}`;
|
205
|
+
}
|
206
|
+
// Функция для преобразования букв колонки в число
|
207
|
+
function colToNumber(col) {
|
208
|
+
let num = 0;
|
209
|
+
for (let i = 0; i < col.length; i++) {
|
210
|
+
num = num * 26 + (col.charCodeAt(i) - 64);
|
211
|
+
}
|
212
|
+
return num;
|
213
|
+
}
|
214
|
+
;
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import * as path from "node:path";
|
2
|
-
import { Transform } from "node:stream";
|
2
|
+
import { PassThrough, Transform } from "node:stream";
|
3
3
|
import { createReadStream } from "node:fs";
|
4
4
|
import { pipeline } from "node:stream/promises";
|
5
5
|
import zlib from "node:zlib";
|
@@ -17,20 +17,32 @@ import { CENTRAL_DIR_HEADER_SIG, END_OF_CENTRAL_DIR_SIG, LOCAL_FILE_HEADER_SIG,
|
|
17
17
|
* @throws {Error} - If the writable stream emits an error.
|
18
18
|
*/
|
19
19
|
export async function createWithStream(fileKeys, destination, output) {
|
20
|
+
// Stores central directory records
|
20
21
|
const centralDirectory = [];
|
22
|
+
// Tracks the current offset in the output stream
|
21
23
|
let offset = 0;
|
22
24
|
for (const filename of fileKeys.sort((a, b) => a.localeCompare(b))) {
|
25
|
+
// Prevent directory traversal
|
23
26
|
if (filename.includes("..")) {
|
24
27
|
throw new Error(`Invalid filename: ${filename}`);
|
25
28
|
}
|
29
|
+
// Construct absolute path to the file
|
26
30
|
const fullPath = path.join(destination, ...filename.split("/"));
|
31
|
+
// Convert filename to UTF-8 buffer
|
27
32
|
const fileNameBuf = Buffer.from(filename, "utf8");
|
33
|
+
// Get modification time in DOS format
|
28
34
|
const modTime = dosTime(new Date());
|
35
|
+
// Read file as stream
|
29
36
|
const source = createReadStream(fullPath);
|
37
|
+
// Create CRC32 transform stream
|
30
38
|
const crc32 = crc32Stream();
|
39
|
+
// Create raw deflate stream (no zlib headers)
|
31
40
|
const deflater = zlib.createDeflateRaw();
|
41
|
+
// Uncompressed size counter
|
32
42
|
let uncompSize = 0;
|
43
|
+
// Compressed size counter
|
33
44
|
let compSize = 0;
|
45
|
+
// Store compressed output data
|
34
46
|
const compressedChunks = [];
|
35
47
|
const sizeCounter = new Transform({
|
36
48
|
transform(chunk, _enc, cb) {
|
@@ -38,135 +50,83 @@ export async function createWithStream(fileKeys, destination, output) {
|
|
38
50
|
cb(null, chunk);
|
39
51
|
},
|
40
52
|
});
|
41
|
-
const collectCompressed = new
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
53
|
+
const collectCompressed = new PassThrough();
|
54
|
+
collectCompressed.on("data", chunk => {
|
55
|
+
// Count compressed bytes
|
56
|
+
compSize += chunk.length;
|
57
|
+
// Save compressed chunk
|
58
|
+
compressedChunks.push(chunk);
|
47
59
|
});
|
48
|
-
//
|
49
|
-
// deflater.on("finish", () => { console.log("deflater finished path:", fullPath, "uncompSize:", uncompSize, "compSize:", compSize); });
|
50
|
-
// deflater.on("error", (err) => { console.log("deflater error path:", fullPath, "error:", err); });
|
51
|
-
// deflater.on("close", () => { console.log("deflater closed path:", fullPath); });
|
52
|
-
// deflater.on("pipe", (src) => { console.log("deflater pipe path:", fullPath); });
|
53
|
-
// deflater.on("unpipe", (src) => { console.log("deflater unpipe path:", fullPath); });
|
54
|
-
// deflater.on("drain", () => { console.log("deflater drain path:", fullPath); });
|
55
|
-
// deflater.on("pause", () => { console.log("deflater pause path:", fullPath); });
|
56
|
-
// deflater.on("resume", () => { console.log("deflater resume path:", fullPath); });
|
57
|
-
// deflater.on("end", () => console.log("deflater ended, path:", fullPath));
|
58
|
-
// source.on("data", (chunk) => { console.log("source data path:", fullPath, "length:", chunk.length); });
|
59
|
-
// source.on("finish", () => { console.log("source finished path:", fullPath, "uncompSize:", uncompSize, "compSize:", compSize); });
|
60
|
-
// source.on("error", (err) => { console.log("source error path:", fullPath, "error:", err); });
|
61
|
-
// source.on("close", () => { console.log("source closed path:", fullPath); });
|
62
|
-
// source.on("pipe", (src) => { console.log("source pipe path:", fullPath); });
|
63
|
-
// source.on("unpipe", (src) => { console.log("source unpipe path:", fullPath); });
|
64
|
-
// source.on("drain", () => { console.log("source drain path:", fullPath); });
|
65
|
-
// source.on("pause", () => { console.log("source pause path:", fullPath); });
|
66
|
-
// source.on("resume", () => { console.log("source resume path:", fullPath); });
|
67
|
-
// source.on("end", () => console.log("source ended, path:", fullPath));
|
68
|
-
// sizeCounter.on("data", (chunk) => { console.log("sizeCounter data path:", fullPath, "length:", chunk.length); });
|
69
|
-
// sizeCounter.on("finish", () => { console.log("sizeCounter finished path:", fullPath, "uncompSize:", uncompSize, "compSize:", compSize); });
|
70
|
-
// sizeCounter.on("error", (err) => { console.log("sizeCounter error path:", fullPath, "error:", err); });
|
71
|
-
// sizeCounter.on("close", () => { console.log("sizeCounter closed path:", fullPath); });
|
72
|
-
// sizeCounter.on("pipe", (src) => { console.log("sizeCounter pipe path:", fullPath); });
|
73
|
-
// sizeCounter.on("unpipe", (src) => { console.log("sizeCounter unpipe path:", fullPath); });
|
74
|
-
// sizeCounter.on("drain", () => { console.log("sizeCounter drain path:", fullPath); });
|
75
|
-
// sizeCounter.on("pause", () => { console.log("sizeCounter pause path:", fullPath); });
|
76
|
-
// sizeCounter.on("resume", () => { console.log("sizeCounter resume path:", fullPath); });
|
77
|
-
// sizeCounter.on("end", () => console.log("sizeCounter ended, path:", fullPath));
|
78
|
-
// crc32.on("data", (chunk) => { console.log("crc32 data path:", fullPath, "length:", chunk.length); });
|
79
|
-
// crc32.on("finish", () => { console.log("crc32 finished path:", fullPath, "uncompSize:", uncompSize, "compSize:", compSize); });
|
80
|
-
// crc32.on("error", (err) => { console.log("crc32 error path:", fullPath, "error:", err); });
|
81
|
-
// crc32.on("close", () => { console.log("crc32 closed path:", fullPath); });
|
82
|
-
// crc32.on("pipe", (src) => { console.log("crc32 pipe path:", fullPath); });
|
83
|
-
// crc32.on("unpipe", (src) => { console.log("crc32 unpipe path:", fullPath); });
|
84
|
-
// crc32.on("drain", () => { console.log("crc32 drain path:", fullPath); });
|
85
|
-
// crc32.on("pause", () => { console.log("crc32 pause path:", fullPath); });
|
86
|
-
// crc32.on("resume", () => { console.log("crc32 resume path:", fullPath); });
|
87
|
-
// crc32.on("end", () => console.log("crc32 ended, path:", fullPath));
|
88
|
-
collectCompressed.on("data", ( /* chunk */) => { });
|
89
|
-
// collectCompressed.on("finish", () => { console.log("collectCompressed finished path:", fullPath, "uncompSize:", uncompSize, "compSize:", compSize); });
|
90
|
-
// collectCompressed.on("error", (err) => { console.log("collectCompressed error path:", fullPath, "error:", err); });
|
91
|
-
// collectCompressed.on("close", () => { console.log("collectCompressed closed path:", fullPath); });
|
92
|
-
// collectCompressed.on("pipe", (src) => { console.log("collectCompressed pipe path:", fullPath); });
|
93
|
-
// collectCompressed.on("unpipe", (src) => { console.log("collectCompressed unpipe path:", fullPath); });
|
94
|
-
// collectCompressed.on("drain", () => { console.log("collectCompressed drain path:", fullPath); });
|
95
|
-
// collectCompressed.on("pause", () => { console.log("collectCompressed pause path:", fullPath); });
|
96
|
-
// collectCompressed.on("resume", () => { console.log("collectCompressed resume path:", fullPath); });
|
97
|
-
// collectCompressed.on("end", () => console.log("collectCompressed ended, path:", fullPath));
|
98
|
-
// deflater.on("readable", () => {
|
99
|
-
// console.log("deflater readable path:", fullPath);
|
100
|
-
// });
|
60
|
+
// Run all transforms in pipeline: read -> count size -> CRC -> deflate -> collect compressed
|
101
61
|
await pipeline(source, sizeCounter, crc32, deflater, collectCompressed);
|
102
|
-
//
|
103
|
-
// source
|
104
|
-
// .pipe(sizeCounter)
|
105
|
-
// .pipe(crc32)
|
106
|
-
// .pipe(deflater)
|
107
|
-
// .pipe(collectCompressed)
|
108
|
-
// .on("finish", resolve)
|
109
|
-
// .on("error", reject);
|
110
|
-
// source.on("error", reject);
|
111
|
-
// deflater.on("error", reject);
|
112
|
-
// });
|
62
|
+
// Get final CRC32 value
|
113
63
|
const crc = crc32.digest();
|
64
|
+
// Concatenate all compressed chunks into a single buffer
|
114
65
|
const compressed = Buffer.concat(compressedChunks);
|
66
|
+
// Create local file header followed by compressed content
|
115
67
|
const localHeader = Buffer.concat([
|
116
|
-
LOCAL_FILE_HEADER_SIG,
|
117
|
-
toBytes(20, 2),
|
118
|
-
toBytes(0, 2),
|
119
|
-
toBytes(8, 2),
|
120
|
-
modTime,
|
121
|
-
toBytes(crc, 4),
|
122
|
-
toBytes(compSize, 4),
|
123
|
-
toBytes(uncompSize, 4),
|
124
|
-
toBytes(fileNameBuf.length, 2),
|
125
|
-
toBytes(0, 2),
|
126
|
-
fileNameBuf,
|
127
|
-
compressed,
|
68
|
+
LOCAL_FILE_HEADER_SIG, // Local file header signature
|
69
|
+
toBytes(20, 2), // Version needed to extract
|
70
|
+
toBytes(0, 2), // General purpose bit flag
|
71
|
+
toBytes(8, 2), // Compression method (deflate)
|
72
|
+
modTime, // File modification time and date
|
73
|
+
toBytes(crc, 4), // CRC-32 checksum
|
74
|
+
toBytes(compSize, 4), // Compressed size
|
75
|
+
toBytes(uncompSize, 4), // Uncompressed size
|
76
|
+
toBytes(fileNameBuf.length, 2), // Filename length
|
77
|
+
toBytes(0, 2), // Extra field length
|
78
|
+
fileNameBuf, // Filename
|
79
|
+
compressed, // Compressed file data
|
128
80
|
]);
|
81
|
+
// Write local file header and data to output
|
129
82
|
await new Promise((resolve, reject) => {
|
130
83
|
output.write(localHeader, err => err ? reject(err) : resolve());
|
131
84
|
});
|
85
|
+
// Create central directory entry for this file
|
132
86
|
const centralEntry = Buffer.concat([
|
133
|
-
CENTRAL_DIR_HEADER_SIG,
|
134
|
-
toBytes(20, 2),
|
135
|
-
toBytes(20, 2),
|
136
|
-
toBytes(0, 2),
|
137
|
-
toBytes(8, 2),
|
138
|
-
modTime,
|
139
|
-
toBytes(crc, 4),
|
140
|
-
toBytes(compSize, 4),
|
141
|
-
toBytes(uncompSize, 4),
|
142
|
-
toBytes(fileNameBuf.length, 2),
|
143
|
-
toBytes(0, 2),
|
144
|
-
toBytes(0, 2),
|
145
|
-
toBytes(0, 2),
|
146
|
-
toBytes(0, 2),
|
147
|
-
toBytes(0, 4),
|
148
|
-
toBytes(offset, 4),
|
149
|
-
fileNameBuf,
|
87
|
+
CENTRAL_DIR_HEADER_SIG, // Central directory file header signature
|
88
|
+
toBytes(20, 2), // Version made by
|
89
|
+
toBytes(20, 2), // Version needed to extract
|
90
|
+
toBytes(0, 2), // General purpose bit flag
|
91
|
+
toBytes(8, 2), // Compression method
|
92
|
+
modTime, // File modification time and date
|
93
|
+
toBytes(crc, 4), // CRC-32 checksum
|
94
|
+
toBytes(compSize, 4), // Compressed size
|
95
|
+
toBytes(uncompSize, 4), // Uncompressed size
|
96
|
+
toBytes(fileNameBuf.length, 2), // Filename length
|
97
|
+
toBytes(0, 2), // Extra field length
|
98
|
+
toBytes(0, 2), // File comment length
|
99
|
+
toBytes(0, 2), // Disk number start
|
100
|
+
toBytes(0, 2), // Internal file attributes
|
101
|
+
toBytes(0, 4), // External file attributes
|
102
|
+
toBytes(offset, 4), // Offset of local header
|
103
|
+
fileNameBuf, // Filename
|
150
104
|
]);
|
105
|
+
// Store for later
|
151
106
|
centralDirectory.push(centralEntry);
|
107
|
+
// Update offset after writing this entry
|
152
108
|
offset += localHeader.length;
|
153
109
|
}
|
110
|
+
// Total size of central directory
|
154
111
|
const centralDirSize = centralDirectory.reduce((sum, entry) => sum + entry.length, 0);
|
112
|
+
// Start of central directory
|
155
113
|
const centralDirOffset = offset;
|
114
|
+
// Write each central directory entry to output
|
156
115
|
for (const entry of centralDirectory) {
|
157
116
|
await new Promise((resolve, reject) => {
|
158
117
|
output.write(entry, err => err ? reject(err) : resolve());
|
159
118
|
});
|
160
119
|
}
|
120
|
+
// Create and write end of central directory record
|
161
121
|
const endRecord = Buffer.concat([
|
162
|
-
END_OF_CENTRAL_DIR_SIG,
|
163
|
-
toBytes(0, 2),
|
164
|
-
toBytes(0, 2),
|
165
|
-
toBytes(centralDirectory.length, 2),
|
166
|
-
toBytes(centralDirectory.length, 2),
|
167
|
-
toBytes(centralDirSize, 4),
|
168
|
-
toBytes(centralDirOffset, 4),
|
169
|
-
toBytes(0, 2),
|
122
|
+
END_OF_CENTRAL_DIR_SIG, // End of central directory signature
|
123
|
+
toBytes(0, 2), // Number of this disk
|
124
|
+
toBytes(0, 2), // Disk with start of central directory
|
125
|
+
toBytes(centralDirectory.length, 2), // Total entries on this disk
|
126
|
+
toBytes(centralDirectory.length, 2), // Total entries overall
|
127
|
+
toBytes(centralDirSize, 4), // Size of central directory
|
128
|
+
toBytes(centralDirOffset, 4), // Offset of start of central directory
|
129
|
+
toBytes(0, 2), // ZIP file comment length
|
170
130
|
]);
|
171
131
|
await new Promise((resolve, reject) => {
|
172
132
|
output.write(endRecord, err => err ? reject(err) : resolve());
|
@@ -29,6 +29,30 @@ export declare class TemplateFs {
|
|
29
29
|
* @experimental This API is experimental and might change in future versions.
|
30
30
|
*/
|
31
31
|
constructor(fileKeys: Set<string>, destination: string);
|
32
|
+
/**
|
33
|
+
* Copies a sheet from the template to a new name.
|
34
|
+
*
|
35
|
+
* @param {string} sourceName - The name of the sheet to copy.
|
36
|
+
* @param {string} newName - The new name for the sheet.
|
37
|
+
* @returns {Promise<void>}
|
38
|
+
* @throws {Error} If the sheet with the source name does not exist.
|
39
|
+
* @throws {Error} If a sheet with the new name already exists.
|
40
|
+
* @experimental This API is experimental and might change in future versions.
|
41
|
+
*/
|
42
|
+
copySheet(sourceName: string, newName: string): Promise<void>;
|
43
|
+
/**
|
44
|
+
* Replaces placeholders in the given sheet with values from the replacements map.
|
45
|
+
*
|
46
|
+
* The function searches for placeholders in the format `${key}` within the sheet
|
47
|
+
* content, where `key` corresponds to a path in the replacements object.
|
48
|
+
* If a value is found for the key, it replaces the placeholder with the value.
|
49
|
+
* If no value is found, the original placeholder remains unchanged.
|
50
|
+
*
|
51
|
+
* @param sheetName - The name of the sheet to be replaced.
|
52
|
+
* @param replacements - An object where keys represent placeholder paths and values are the replacements.
|
53
|
+
* @returns A promise that resolves when the substitution is complete.
|
54
|
+
*/
|
55
|
+
substitute(sheetName: string, replacements: Record<string, unknown>): Promise<void>;
|
32
56
|
/**
|
33
57
|
* Inserts rows into a specific sheet in the template.
|
34
58
|
*
|